1
2
3
4 package ssa
5
6 import "math"
7 import "cmd/compile/internal/types"
8
9 func rewriteValueAMD64(v *Value) bool {
10 switch v.Op {
11 case OpAMD64ADCQ:
12 return rewriteValueAMD64_OpAMD64ADCQ(v)
13 case OpAMD64ADCQconst:
14 return rewriteValueAMD64_OpAMD64ADCQconst(v)
15 case OpAMD64ADDL:
16 return rewriteValueAMD64_OpAMD64ADDL(v)
17 case OpAMD64ADDLconst:
18 return rewriteValueAMD64_OpAMD64ADDLconst(v)
19 case OpAMD64ADDLconstmodify:
20 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
21 case OpAMD64ADDLload:
22 return rewriteValueAMD64_OpAMD64ADDLload(v)
23 case OpAMD64ADDLmodify:
24 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
25 case OpAMD64ADDQ:
26 return rewriteValueAMD64_OpAMD64ADDQ(v)
27 case OpAMD64ADDQcarry:
28 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
29 case OpAMD64ADDQconst:
30 return rewriteValueAMD64_OpAMD64ADDQconst(v)
31 case OpAMD64ADDQconstmodify:
32 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
33 case OpAMD64ADDQload:
34 return rewriteValueAMD64_OpAMD64ADDQload(v)
35 case OpAMD64ADDQmodify:
36 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
37 case OpAMD64ADDSD:
38 return rewriteValueAMD64_OpAMD64ADDSD(v)
39 case OpAMD64ADDSDload:
40 return rewriteValueAMD64_OpAMD64ADDSDload(v)
41 case OpAMD64ADDSS:
42 return rewriteValueAMD64_OpAMD64ADDSS(v)
43 case OpAMD64ADDSSload:
44 return rewriteValueAMD64_OpAMD64ADDSSload(v)
45 case OpAMD64ANDL:
46 return rewriteValueAMD64_OpAMD64ANDL(v)
47 case OpAMD64ANDLconst:
48 return rewriteValueAMD64_OpAMD64ANDLconst(v)
49 case OpAMD64ANDLconstmodify:
50 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
51 case OpAMD64ANDLload:
52 return rewriteValueAMD64_OpAMD64ANDLload(v)
53 case OpAMD64ANDLmodify:
54 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
55 case OpAMD64ANDQ:
56 return rewriteValueAMD64_OpAMD64ANDQ(v)
57 case OpAMD64ANDQconst:
58 return rewriteValueAMD64_OpAMD64ANDQconst(v)
59 case OpAMD64ANDQconstmodify:
60 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
61 case OpAMD64ANDQload:
62 return rewriteValueAMD64_OpAMD64ANDQload(v)
63 case OpAMD64ANDQmodify:
64 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
65 case OpAMD64BSFQ:
66 return rewriteValueAMD64_OpAMD64BSFQ(v)
67 case OpAMD64BTCLconst:
68 return rewriteValueAMD64_OpAMD64BTCLconst(v)
69 case OpAMD64BTCLconstmodify:
70 return rewriteValueAMD64_OpAMD64BTCLconstmodify(v)
71 case OpAMD64BTCLmodify:
72 return rewriteValueAMD64_OpAMD64BTCLmodify(v)
73 case OpAMD64BTCQconst:
74 return rewriteValueAMD64_OpAMD64BTCQconst(v)
75 case OpAMD64BTCQconstmodify:
76 return rewriteValueAMD64_OpAMD64BTCQconstmodify(v)
77 case OpAMD64BTCQmodify:
78 return rewriteValueAMD64_OpAMD64BTCQmodify(v)
79 case OpAMD64BTLconst:
80 return rewriteValueAMD64_OpAMD64BTLconst(v)
81 case OpAMD64BTQconst:
82 return rewriteValueAMD64_OpAMD64BTQconst(v)
83 case OpAMD64BTRLconst:
84 return rewriteValueAMD64_OpAMD64BTRLconst(v)
85 case OpAMD64BTRLconstmodify:
86 return rewriteValueAMD64_OpAMD64BTRLconstmodify(v)
87 case OpAMD64BTRLmodify:
88 return rewriteValueAMD64_OpAMD64BTRLmodify(v)
89 case OpAMD64BTRQconst:
90 return rewriteValueAMD64_OpAMD64BTRQconst(v)
91 case OpAMD64BTRQconstmodify:
92 return rewriteValueAMD64_OpAMD64BTRQconstmodify(v)
93 case OpAMD64BTRQmodify:
94 return rewriteValueAMD64_OpAMD64BTRQmodify(v)
95 case OpAMD64BTSLconst:
96 return rewriteValueAMD64_OpAMD64BTSLconst(v)
97 case OpAMD64BTSLconstmodify:
98 return rewriteValueAMD64_OpAMD64BTSLconstmodify(v)
99 case OpAMD64BTSLmodify:
100 return rewriteValueAMD64_OpAMD64BTSLmodify(v)
101 case OpAMD64BTSQconst:
102 return rewriteValueAMD64_OpAMD64BTSQconst(v)
103 case OpAMD64BTSQconstmodify:
104 return rewriteValueAMD64_OpAMD64BTSQconstmodify(v)
105 case OpAMD64BTSQmodify:
106 return rewriteValueAMD64_OpAMD64BTSQmodify(v)
107 case OpAMD64CMOVLCC:
108 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
109 case OpAMD64CMOVLCS:
110 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
111 case OpAMD64CMOVLEQ:
112 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
113 case OpAMD64CMOVLGE:
114 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
115 case OpAMD64CMOVLGT:
116 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
117 case OpAMD64CMOVLHI:
118 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
119 case OpAMD64CMOVLLE:
120 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
121 case OpAMD64CMOVLLS:
122 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
123 case OpAMD64CMOVLLT:
124 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
125 case OpAMD64CMOVLNE:
126 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
127 case OpAMD64CMOVQCC:
128 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
129 case OpAMD64CMOVQCS:
130 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
131 case OpAMD64CMOVQEQ:
132 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
133 case OpAMD64CMOVQGE:
134 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
135 case OpAMD64CMOVQGT:
136 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
137 case OpAMD64CMOVQHI:
138 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
139 case OpAMD64CMOVQLE:
140 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
141 case OpAMD64CMOVQLS:
142 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
143 case OpAMD64CMOVQLT:
144 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
145 case OpAMD64CMOVQNE:
146 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
147 case OpAMD64CMOVWCC:
148 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
149 case OpAMD64CMOVWCS:
150 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
151 case OpAMD64CMOVWEQ:
152 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
153 case OpAMD64CMOVWGE:
154 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
155 case OpAMD64CMOVWGT:
156 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
157 case OpAMD64CMOVWHI:
158 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
159 case OpAMD64CMOVWLE:
160 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
161 case OpAMD64CMOVWLS:
162 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
163 case OpAMD64CMOVWLT:
164 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
165 case OpAMD64CMOVWNE:
166 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
167 case OpAMD64CMPB:
168 return rewriteValueAMD64_OpAMD64CMPB(v)
169 case OpAMD64CMPBconst:
170 return rewriteValueAMD64_OpAMD64CMPBconst(v)
171 case OpAMD64CMPBconstload:
172 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
173 case OpAMD64CMPBload:
174 return rewriteValueAMD64_OpAMD64CMPBload(v)
175 case OpAMD64CMPL:
176 return rewriteValueAMD64_OpAMD64CMPL(v)
177 case OpAMD64CMPLconst:
178 return rewriteValueAMD64_OpAMD64CMPLconst(v)
179 case OpAMD64CMPLconstload:
180 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
181 case OpAMD64CMPLload:
182 return rewriteValueAMD64_OpAMD64CMPLload(v)
183 case OpAMD64CMPQ:
184 return rewriteValueAMD64_OpAMD64CMPQ(v)
185 case OpAMD64CMPQconst:
186 return rewriteValueAMD64_OpAMD64CMPQconst(v)
187 case OpAMD64CMPQconstload:
188 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
189 case OpAMD64CMPQload:
190 return rewriteValueAMD64_OpAMD64CMPQload(v)
191 case OpAMD64CMPW:
192 return rewriteValueAMD64_OpAMD64CMPW(v)
193 case OpAMD64CMPWconst:
194 return rewriteValueAMD64_OpAMD64CMPWconst(v)
195 case OpAMD64CMPWconstload:
196 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
197 case OpAMD64CMPWload:
198 return rewriteValueAMD64_OpAMD64CMPWload(v)
199 case OpAMD64CMPXCHGLlock:
200 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
201 case OpAMD64CMPXCHGQlock:
202 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
203 case OpAMD64DIVSD:
204 return rewriteValueAMD64_OpAMD64DIVSD(v)
205 case OpAMD64DIVSDload:
206 return rewriteValueAMD64_OpAMD64DIVSDload(v)
207 case OpAMD64DIVSS:
208 return rewriteValueAMD64_OpAMD64DIVSS(v)
209 case OpAMD64DIVSSload:
210 return rewriteValueAMD64_OpAMD64DIVSSload(v)
211 case OpAMD64HMULL:
212 return rewriteValueAMD64_OpAMD64HMULL(v)
213 case OpAMD64HMULLU:
214 return rewriteValueAMD64_OpAMD64HMULLU(v)
215 case OpAMD64HMULQ:
216 return rewriteValueAMD64_OpAMD64HMULQ(v)
217 case OpAMD64HMULQU:
218 return rewriteValueAMD64_OpAMD64HMULQU(v)
219 case OpAMD64LEAL:
220 return rewriteValueAMD64_OpAMD64LEAL(v)
221 case OpAMD64LEAL1:
222 return rewriteValueAMD64_OpAMD64LEAL1(v)
223 case OpAMD64LEAL2:
224 return rewriteValueAMD64_OpAMD64LEAL2(v)
225 case OpAMD64LEAL4:
226 return rewriteValueAMD64_OpAMD64LEAL4(v)
227 case OpAMD64LEAL8:
228 return rewriteValueAMD64_OpAMD64LEAL8(v)
229 case OpAMD64LEAQ:
230 return rewriteValueAMD64_OpAMD64LEAQ(v)
231 case OpAMD64LEAQ1:
232 return rewriteValueAMD64_OpAMD64LEAQ1(v)
233 case OpAMD64LEAQ2:
234 return rewriteValueAMD64_OpAMD64LEAQ2(v)
235 case OpAMD64LEAQ4:
236 return rewriteValueAMD64_OpAMD64LEAQ4(v)
237 case OpAMD64LEAQ8:
238 return rewriteValueAMD64_OpAMD64LEAQ8(v)
239 case OpAMD64MOVBQSX:
240 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
241 case OpAMD64MOVBQSXload:
242 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
243 case OpAMD64MOVBQZX:
244 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
245 case OpAMD64MOVBatomicload:
246 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
247 case OpAMD64MOVBload:
248 return rewriteValueAMD64_OpAMD64MOVBload(v)
249 case OpAMD64MOVBstore:
250 return rewriteValueAMD64_OpAMD64MOVBstore(v)
251 case OpAMD64MOVBstoreconst:
252 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
253 case OpAMD64MOVLQSX:
254 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
255 case OpAMD64MOVLQSXload:
256 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
257 case OpAMD64MOVLQZX:
258 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
259 case OpAMD64MOVLatomicload:
260 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
261 case OpAMD64MOVLf2i:
262 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
263 case OpAMD64MOVLi2f:
264 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
265 case OpAMD64MOVLload:
266 return rewriteValueAMD64_OpAMD64MOVLload(v)
267 case OpAMD64MOVLstore:
268 return rewriteValueAMD64_OpAMD64MOVLstore(v)
269 case OpAMD64MOVLstoreconst:
270 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
271 case OpAMD64MOVOload:
272 return rewriteValueAMD64_OpAMD64MOVOload(v)
273 case OpAMD64MOVOstore:
274 return rewriteValueAMD64_OpAMD64MOVOstore(v)
275 case OpAMD64MOVQatomicload:
276 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
277 case OpAMD64MOVQf2i:
278 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
279 case OpAMD64MOVQi2f:
280 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
281 case OpAMD64MOVQload:
282 return rewriteValueAMD64_OpAMD64MOVQload(v)
283 case OpAMD64MOVQstore:
284 return rewriteValueAMD64_OpAMD64MOVQstore(v)
285 case OpAMD64MOVQstoreconst:
286 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
287 case OpAMD64MOVSDload:
288 return rewriteValueAMD64_OpAMD64MOVSDload(v)
289 case OpAMD64MOVSDstore:
290 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
291 case OpAMD64MOVSSload:
292 return rewriteValueAMD64_OpAMD64MOVSSload(v)
293 case OpAMD64MOVSSstore:
294 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
295 case OpAMD64MOVWQSX:
296 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
297 case OpAMD64MOVWQSXload:
298 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
299 case OpAMD64MOVWQZX:
300 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
301 case OpAMD64MOVWload:
302 return rewriteValueAMD64_OpAMD64MOVWload(v)
303 case OpAMD64MOVWstore:
304 return rewriteValueAMD64_OpAMD64MOVWstore(v)
305 case OpAMD64MOVWstoreconst:
306 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
307 case OpAMD64MULL:
308 return rewriteValueAMD64_OpAMD64MULL(v)
309 case OpAMD64MULLconst:
310 return rewriteValueAMD64_OpAMD64MULLconst(v)
311 case OpAMD64MULQ:
312 return rewriteValueAMD64_OpAMD64MULQ(v)
313 case OpAMD64MULQconst:
314 return rewriteValueAMD64_OpAMD64MULQconst(v)
315 case OpAMD64MULSD:
316 return rewriteValueAMD64_OpAMD64MULSD(v)
317 case OpAMD64MULSDload:
318 return rewriteValueAMD64_OpAMD64MULSDload(v)
319 case OpAMD64MULSS:
320 return rewriteValueAMD64_OpAMD64MULSS(v)
321 case OpAMD64MULSSload:
322 return rewriteValueAMD64_OpAMD64MULSSload(v)
323 case OpAMD64NEGL:
324 return rewriteValueAMD64_OpAMD64NEGL(v)
325 case OpAMD64NEGQ:
326 return rewriteValueAMD64_OpAMD64NEGQ(v)
327 case OpAMD64NOTL:
328 return rewriteValueAMD64_OpAMD64NOTL(v)
329 case OpAMD64NOTQ:
330 return rewriteValueAMD64_OpAMD64NOTQ(v)
331 case OpAMD64ORL:
332 return rewriteValueAMD64_OpAMD64ORL(v)
333 case OpAMD64ORLconst:
334 return rewriteValueAMD64_OpAMD64ORLconst(v)
335 case OpAMD64ORLconstmodify:
336 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
337 case OpAMD64ORLload:
338 return rewriteValueAMD64_OpAMD64ORLload(v)
339 case OpAMD64ORLmodify:
340 return rewriteValueAMD64_OpAMD64ORLmodify(v)
341 case OpAMD64ORQ:
342 return rewriteValueAMD64_OpAMD64ORQ(v)
343 case OpAMD64ORQconst:
344 return rewriteValueAMD64_OpAMD64ORQconst(v)
345 case OpAMD64ORQconstmodify:
346 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
347 case OpAMD64ORQload:
348 return rewriteValueAMD64_OpAMD64ORQload(v)
349 case OpAMD64ORQmodify:
350 return rewriteValueAMD64_OpAMD64ORQmodify(v)
351 case OpAMD64ROLB:
352 return rewriteValueAMD64_OpAMD64ROLB(v)
353 case OpAMD64ROLBconst:
354 return rewriteValueAMD64_OpAMD64ROLBconst(v)
355 case OpAMD64ROLL:
356 return rewriteValueAMD64_OpAMD64ROLL(v)
357 case OpAMD64ROLLconst:
358 return rewriteValueAMD64_OpAMD64ROLLconst(v)
359 case OpAMD64ROLQ:
360 return rewriteValueAMD64_OpAMD64ROLQ(v)
361 case OpAMD64ROLQconst:
362 return rewriteValueAMD64_OpAMD64ROLQconst(v)
363 case OpAMD64ROLW:
364 return rewriteValueAMD64_OpAMD64ROLW(v)
365 case OpAMD64ROLWconst:
366 return rewriteValueAMD64_OpAMD64ROLWconst(v)
367 case OpAMD64RORB:
368 return rewriteValueAMD64_OpAMD64RORB(v)
369 case OpAMD64RORL:
370 return rewriteValueAMD64_OpAMD64RORL(v)
371 case OpAMD64RORQ:
372 return rewriteValueAMD64_OpAMD64RORQ(v)
373 case OpAMD64RORW:
374 return rewriteValueAMD64_OpAMD64RORW(v)
375 case OpAMD64SARB:
376 return rewriteValueAMD64_OpAMD64SARB(v)
377 case OpAMD64SARBconst:
378 return rewriteValueAMD64_OpAMD64SARBconst(v)
379 case OpAMD64SARL:
380 return rewriteValueAMD64_OpAMD64SARL(v)
381 case OpAMD64SARLconst:
382 return rewriteValueAMD64_OpAMD64SARLconst(v)
383 case OpAMD64SARQ:
384 return rewriteValueAMD64_OpAMD64SARQ(v)
385 case OpAMD64SARQconst:
386 return rewriteValueAMD64_OpAMD64SARQconst(v)
387 case OpAMD64SARW:
388 return rewriteValueAMD64_OpAMD64SARW(v)
389 case OpAMD64SARWconst:
390 return rewriteValueAMD64_OpAMD64SARWconst(v)
391 case OpAMD64SBBLcarrymask:
392 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
393 case OpAMD64SBBQ:
394 return rewriteValueAMD64_OpAMD64SBBQ(v)
395 case OpAMD64SBBQcarrymask:
396 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
397 case OpAMD64SBBQconst:
398 return rewriteValueAMD64_OpAMD64SBBQconst(v)
399 case OpAMD64SETA:
400 return rewriteValueAMD64_OpAMD64SETA(v)
401 case OpAMD64SETAE:
402 return rewriteValueAMD64_OpAMD64SETAE(v)
403 case OpAMD64SETAEstore:
404 return rewriteValueAMD64_OpAMD64SETAEstore(v)
405 case OpAMD64SETAstore:
406 return rewriteValueAMD64_OpAMD64SETAstore(v)
407 case OpAMD64SETB:
408 return rewriteValueAMD64_OpAMD64SETB(v)
409 case OpAMD64SETBE:
410 return rewriteValueAMD64_OpAMD64SETBE(v)
411 case OpAMD64SETBEstore:
412 return rewriteValueAMD64_OpAMD64SETBEstore(v)
413 case OpAMD64SETBstore:
414 return rewriteValueAMD64_OpAMD64SETBstore(v)
415 case OpAMD64SETEQ:
416 return rewriteValueAMD64_OpAMD64SETEQ(v)
417 case OpAMD64SETEQstore:
418 return rewriteValueAMD64_OpAMD64SETEQstore(v)
419 case OpAMD64SETG:
420 return rewriteValueAMD64_OpAMD64SETG(v)
421 case OpAMD64SETGE:
422 return rewriteValueAMD64_OpAMD64SETGE(v)
423 case OpAMD64SETGEstore:
424 return rewriteValueAMD64_OpAMD64SETGEstore(v)
425 case OpAMD64SETGstore:
426 return rewriteValueAMD64_OpAMD64SETGstore(v)
427 case OpAMD64SETL:
428 return rewriteValueAMD64_OpAMD64SETL(v)
429 case OpAMD64SETLE:
430 return rewriteValueAMD64_OpAMD64SETLE(v)
431 case OpAMD64SETLEstore:
432 return rewriteValueAMD64_OpAMD64SETLEstore(v)
433 case OpAMD64SETLstore:
434 return rewriteValueAMD64_OpAMD64SETLstore(v)
435 case OpAMD64SETNE:
436 return rewriteValueAMD64_OpAMD64SETNE(v)
437 case OpAMD64SETNEstore:
438 return rewriteValueAMD64_OpAMD64SETNEstore(v)
439 case OpAMD64SHLL:
440 return rewriteValueAMD64_OpAMD64SHLL(v)
441 case OpAMD64SHLLconst:
442 return rewriteValueAMD64_OpAMD64SHLLconst(v)
443 case OpAMD64SHLQ:
444 return rewriteValueAMD64_OpAMD64SHLQ(v)
445 case OpAMD64SHLQconst:
446 return rewriteValueAMD64_OpAMD64SHLQconst(v)
447 case OpAMD64SHRB:
448 return rewriteValueAMD64_OpAMD64SHRB(v)
449 case OpAMD64SHRBconst:
450 return rewriteValueAMD64_OpAMD64SHRBconst(v)
451 case OpAMD64SHRL:
452 return rewriteValueAMD64_OpAMD64SHRL(v)
453 case OpAMD64SHRLconst:
454 return rewriteValueAMD64_OpAMD64SHRLconst(v)
455 case OpAMD64SHRQ:
456 return rewriteValueAMD64_OpAMD64SHRQ(v)
457 case OpAMD64SHRQconst:
458 return rewriteValueAMD64_OpAMD64SHRQconst(v)
459 case OpAMD64SHRW:
460 return rewriteValueAMD64_OpAMD64SHRW(v)
461 case OpAMD64SHRWconst:
462 return rewriteValueAMD64_OpAMD64SHRWconst(v)
463 case OpAMD64SUBL:
464 return rewriteValueAMD64_OpAMD64SUBL(v)
465 case OpAMD64SUBLconst:
466 return rewriteValueAMD64_OpAMD64SUBLconst(v)
467 case OpAMD64SUBLload:
468 return rewriteValueAMD64_OpAMD64SUBLload(v)
469 case OpAMD64SUBLmodify:
470 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
471 case OpAMD64SUBQ:
472 return rewriteValueAMD64_OpAMD64SUBQ(v)
473 case OpAMD64SUBQborrow:
474 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
475 case OpAMD64SUBQconst:
476 return rewriteValueAMD64_OpAMD64SUBQconst(v)
477 case OpAMD64SUBQload:
478 return rewriteValueAMD64_OpAMD64SUBQload(v)
479 case OpAMD64SUBQmodify:
480 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
481 case OpAMD64SUBSD:
482 return rewriteValueAMD64_OpAMD64SUBSD(v)
483 case OpAMD64SUBSDload:
484 return rewriteValueAMD64_OpAMD64SUBSDload(v)
485 case OpAMD64SUBSS:
486 return rewriteValueAMD64_OpAMD64SUBSS(v)
487 case OpAMD64SUBSSload:
488 return rewriteValueAMD64_OpAMD64SUBSSload(v)
489 case OpAMD64TESTB:
490 return rewriteValueAMD64_OpAMD64TESTB(v)
491 case OpAMD64TESTBconst:
492 return rewriteValueAMD64_OpAMD64TESTBconst(v)
493 case OpAMD64TESTL:
494 return rewriteValueAMD64_OpAMD64TESTL(v)
495 case OpAMD64TESTLconst:
496 return rewriteValueAMD64_OpAMD64TESTLconst(v)
497 case OpAMD64TESTQ:
498 return rewriteValueAMD64_OpAMD64TESTQ(v)
499 case OpAMD64TESTQconst:
500 return rewriteValueAMD64_OpAMD64TESTQconst(v)
501 case OpAMD64TESTW:
502 return rewriteValueAMD64_OpAMD64TESTW(v)
503 case OpAMD64TESTWconst:
504 return rewriteValueAMD64_OpAMD64TESTWconst(v)
505 case OpAMD64XADDLlock:
506 return rewriteValueAMD64_OpAMD64XADDLlock(v)
507 case OpAMD64XADDQlock:
508 return rewriteValueAMD64_OpAMD64XADDQlock(v)
509 case OpAMD64XCHGL:
510 return rewriteValueAMD64_OpAMD64XCHGL(v)
511 case OpAMD64XCHGQ:
512 return rewriteValueAMD64_OpAMD64XCHGQ(v)
513 case OpAMD64XORL:
514 return rewriteValueAMD64_OpAMD64XORL(v)
515 case OpAMD64XORLconst:
516 return rewriteValueAMD64_OpAMD64XORLconst(v)
517 case OpAMD64XORLconstmodify:
518 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
519 case OpAMD64XORLload:
520 return rewriteValueAMD64_OpAMD64XORLload(v)
521 case OpAMD64XORLmodify:
522 return rewriteValueAMD64_OpAMD64XORLmodify(v)
523 case OpAMD64XORQ:
524 return rewriteValueAMD64_OpAMD64XORQ(v)
525 case OpAMD64XORQconst:
526 return rewriteValueAMD64_OpAMD64XORQconst(v)
527 case OpAMD64XORQconstmodify:
528 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
529 case OpAMD64XORQload:
530 return rewriteValueAMD64_OpAMD64XORQload(v)
531 case OpAMD64XORQmodify:
532 return rewriteValueAMD64_OpAMD64XORQmodify(v)
533 case OpAdd16:
534 v.Op = OpAMD64ADDL
535 return true
536 case OpAdd32:
537 v.Op = OpAMD64ADDL
538 return true
539 case OpAdd32F:
540 v.Op = OpAMD64ADDSS
541 return true
542 case OpAdd64:
543 v.Op = OpAMD64ADDQ
544 return true
545 case OpAdd64F:
546 v.Op = OpAMD64ADDSD
547 return true
548 case OpAdd8:
549 v.Op = OpAMD64ADDL
550 return true
551 case OpAddPtr:
552 v.Op = OpAMD64ADDQ
553 return true
554 case OpAddr:
555 v.Op = OpAMD64LEAQ
556 return true
557 case OpAnd16:
558 v.Op = OpAMD64ANDL
559 return true
560 case OpAnd32:
561 v.Op = OpAMD64ANDL
562 return true
563 case OpAnd64:
564 v.Op = OpAMD64ANDQ
565 return true
566 case OpAnd8:
567 v.Op = OpAMD64ANDL
568 return true
569 case OpAndB:
570 v.Op = OpAMD64ANDL
571 return true
572 case OpAtomicAdd32:
573 return rewriteValueAMD64_OpAtomicAdd32(v)
574 case OpAtomicAdd64:
575 return rewriteValueAMD64_OpAtomicAdd64(v)
576 case OpAtomicAnd8:
577 v.Op = OpAMD64ANDBlock
578 return true
579 case OpAtomicCompareAndSwap32:
580 v.Op = OpAMD64CMPXCHGLlock
581 return true
582 case OpAtomicCompareAndSwap64:
583 v.Op = OpAMD64CMPXCHGQlock
584 return true
585 case OpAtomicExchange32:
586 return rewriteValueAMD64_OpAtomicExchange32(v)
587 case OpAtomicExchange64:
588 return rewriteValueAMD64_OpAtomicExchange64(v)
589 case OpAtomicLoad32:
590 v.Op = OpAMD64MOVLatomicload
591 return true
592 case OpAtomicLoad64:
593 v.Op = OpAMD64MOVQatomicload
594 return true
595 case OpAtomicLoad8:
596 v.Op = OpAMD64MOVBatomicload
597 return true
598 case OpAtomicLoadPtr:
599 v.Op = OpAMD64MOVQatomicload
600 return true
601 case OpAtomicOr8:
602 v.Op = OpAMD64ORBlock
603 return true
604 case OpAtomicStore32:
605 return rewriteValueAMD64_OpAtomicStore32(v)
606 case OpAtomicStore64:
607 return rewriteValueAMD64_OpAtomicStore64(v)
608 case OpAtomicStore8:
609 return rewriteValueAMD64_OpAtomicStore8(v)
610 case OpAtomicStorePtrNoWB:
611 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
612 case OpAvg64u:
613 v.Op = OpAMD64AVGQU
614 return true
615 case OpBitLen16:
616 return rewriteValueAMD64_OpBitLen16(v)
617 case OpBitLen32:
618 return rewriteValueAMD64_OpBitLen32(v)
619 case OpBitLen64:
620 return rewriteValueAMD64_OpBitLen64(v)
621 case OpBitLen8:
622 return rewriteValueAMD64_OpBitLen8(v)
623 case OpBswap32:
624 v.Op = OpAMD64BSWAPL
625 return true
626 case OpBswap64:
627 v.Op = OpAMD64BSWAPQ
628 return true
629 case OpCeil:
630 return rewriteValueAMD64_OpCeil(v)
631 case OpClosureCall:
632 v.Op = OpAMD64CALLclosure
633 return true
634 case OpCom16:
635 v.Op = OpAMD64NOTL
636 return true
637 case OpCom32:
638 v.Op = OpAMD64NOTL
639 return true
640 case OpCom64:
641 v.Op = OpAMD64NOTQ
642 return true
643 case OpCom8:
644 v.Op = OpAMD64NOTL
645 return true
646 case OpCondSelect:
647 return rewriteValueAMD64_OpCondSelect(v)
648 case OpConst16:
649 return rewriteValueAMD64_OpConst16(v)
650 case OpConst32:
651 v.Op = OpAMD64MOVLconst
652 return true
653 case OpConst32F:
654 v.Op = OpAMD64MOVSSconst
655 return true
656 case OpConst64:
657 v.Op = OpAMD64MOVQconst
658 return true
659 case OpConst64F:
660 v.Op = OpAMD64MOVSDconst
661 return true
662 case OpConst8:
663 return rewriteValueAMD64_OpConst8(v)
664 case OpConstBool:
665 return rewriteValueAMD64_OpConstBool(v)
666 case OpConstNil:
667 return rewriteValueAMD64_OpConstNil(v)
668 case OpCtz16:
669 return rewriteValueAMD64_OpCtz16(v)
670 case OpCtz16NonZero:
671 v.Op = OpAMD64BSFL
672 return true
673 case OpCtz32:
674 return rewriteValueAMD64_OpCtz32(v)
675 case OpCtz32NonZero:
676 v.Op = OpAMD64BSFL
677 return true
678 case OpCtz64:
679 return rewriteValueAMD64_OpCtz64(v)
680 case OpCtz64NonZero:
681 return rewriteValueAMD64_OpCtz64NonZero(v)
682 case OpCtz8:
683 return rewriteValueAMD64_OpCtz8(v)
684 case OpCtz8NonZero:
685 v.Op = OpAMD64BSFL
686 return true
687 case OpCvt32Fto32:
688 v.Op = OpAMD64CVTTSS2SL
689 return true
690 case OpCvt32Fto64:
691 v.Op = OpAMD64CVTTSS2SQ
692 return true
693 case OpCvt32Fto64F:
694 v.Op = OpAMD64CVTSS2SD
695 return true
696 case OpCvt32to32F:
697 v.Op = OpAMD64CVTSL2SS
698 return true
699 case OpCvt32to64F:
700 v.Op = OpAMD64CVTSL2SD
701 return true
702 case OpCvt64Fto32:
703 v.Op = OpAMD64CVTTSD2SL
704 return true
705 case OpCvt64Fto32F:
706 v.Op = OpAMD64CVTSD2SS
707 return true
708 case OpCvt64Fto64:
709 v.Op = OpAMD64CVTTSD2SQ
710 return true
711 case OpCvt64to32F:
712 v.Op = OpAMD64CVTSQ2SS
713 return true
714 case OpCvt64to64F:
715 v.Op = OpAMD64CVTSQ2SD
716 return true
717 case OpCvtBoolToUint8:
718 v.Op = OpCopy
719 return true
720 case OpDiv128u:
721 v.Op = OpAMD64DIVQU2
722 return true
723 case OpDiv16:
724 return rewriteValueAMD64_OpDiv16(v)
725 case OpDiv16u:
726 return rewriteValueAMD64_OpDiv16u(v)
727 case OpDiv32:
728 return rewriteValueAMD64_OpDiv32(v)
729 case OpDiv32F:
730 v.Op = OpAMD64DIVSS
731 return true
732 case OpDiv32u:
733 return rewriteValueAMD64_OpDiv32u(v)
734 case OpDiv64:
735 return rewriteValueAMD64_OpDiv64(v)
736 case OpDiv64F:
737 v.Op = OpAMD64DIVSD
738 return true
739 case OpDiv64u:
740 return rewriteValueAMD64_OpDiv64u(v)
741 case OpDiv8:
742 return rewriteValueAMD64_OpDiv8(v)
743 case OpDiv8u:
744 return rewriteValueAMD64_OpDiv8u(v)
745 case OpEq16:
746 return rewriteValueAMD64_OpEq16(v)
747 case OpEq32:
748 return rewriteValueAMD64_OpEq32(v)
749 case OpEq32F:
750 return rewriteValueAMD64_OpEq32F(v)
751 case OpEq64:
752 return rewriteValueAMD64_OpEq64(v)
753 case OpEq64F:
754 return rewriteValueAMD64_OpEq64F(v)
755 case OpEq8:
756 return rewriteValueAMD64_OpEq8(v)
757 case OpEqB:
758 return rewriteValueAMD64_OpEqB(v)
759 case OpEqPtr:
760 return rewriteValueAMD64_OpEqPtr(v)
761 case OpFMA:
762 return rewriteValueAMD64_OpFMA(v)
763 case OpFloor:
764 return rewriteValueAMD64_OpFloor(v)
765 case OpGetCallerPC:
766 v.Op = OpAMD64LoweredGetCallerPC
767 return true
768 case OpGetCallerSP:
769 v.Op = OpAMD64LoweredGetCallerSP
770 return true
771 case OpGetClosurePtr:
772 v.Op = OpAMD64LoweredGetClosurePtr
773 return true
774 case OpGetG:
775 v.Op = OpAMD64LoweredGetG
776 return true
777 case OpHasCPUFeature:
778 return rewriteValueAMD64_OpHasCPUFeature(v)
779 case OpHmul32:
780 v.Op = OpAMD64HMULL
781 return true
782 case OpHmul32u:
783 v.Op = OpAMD64HMULLU
784 return true
785 case OpHmul64:
786 v.Op = OpAMD64HMULQ
787 return true
788 case OpHmul64u:
789 v.Op = OpAMD64HMULQU
790 return true
791 case OpInterCall:
792 v.Op = OpAMD64CALLinter
793 return true
794 case OpIsInBounds:
795 return rewriteValueAMD64_OpIsInBounds(v)
796 case OpIsNonNil:
797 return rewriteValueAMD64_OpIsNonNil(v)
798 case OpIsSliceInBounds:
799 return rewriteValueAMD64_OpIsSliceInBounds(v)
800 case OpLeq16:
801 return rewriteValueAMD64_OpLeq16(v)
802 case OpLeq16U:
803 return rewriteValueAMD64_OpLeq16U(v)
804 case OpLeq32:
805 return rewriteValueAMD64_OpLeq32(v)
806 case OpLeq32F:
807 return rewriteValueAMD64_OpLeq32F(v)
808 case OpLeq32U:
809 return rewriteValueAMD64_OpLeq32U(v)
810 case OpLeq64:
811 return rewriteValueAMD64_OpLeq64(v)
812 case OpLeq64F:
813 return rewriteValueAMD64_OpLeq64F(v)
814 case OpLeq64U:
815 return rewriteValueAMD64_OpLeq64U(v)
816 case OpLeq8:
817 return rewriteValueAMD64_OpLeq8(v)
818 case OpLeq8U:
819 return rewriteValueAMD64_OpLeq8U(v)
820 case OpLess16:
821 return rewriteValueAMD64_OpLess16(v)
822 case OpLess16U:
823 return rewriteValueAMD64_OpLess16U(v)
824 case OpLess32:
825 return rewriteValueAMD64_OpLess32(v)
826 case OpLess32F:
827 return rewriteValueAMD64_OpLess32F(v)
828 case OpLess32U:
829 return rewriteValueAMD64_OpLess32U(v)
830 case OpLess64:
831 return rewriteValueAMD64_OpLess64(v)
832 case OpLess64F:
833 return rewriteValueAMD64_OpLess64F(v)
834 case OpLess64U:
835 return rewriteValueAMD64_OpLess64U(v)
836 case OpLess8:
837 return rewriteValueAMD64_OpLess8(v)
838 case OpLess8U:
839 return rewriteValueAMD64_OpLess8U(v)
840 case OpLoad:
841 return rewriteValueAMD64_OpLoad(v)
842 case OpLocalAddr:
843 return rewriteValueAMD64_OpLocalAddr(v)
844 case OpLsh16x16:
845 return rewriteValueAMD64_OpLsh16x16(v)
846 case OpLsh16x32:
847 return rewriteValueAMD64_OpLsh16x32(v)
848 case OpLsh16x64:
849 return rewriteValueAMD64_OpLsh16x64(v)
850 case OpLsh16x8:
851 return rewriteValueAMD64_OpLsh16x8(v)
852 case OpLsh32x16:
853 return rewriteValueAMD64_OpLsh32x16(v)
854 case OpLsh32x32:
855 return rewriteValueAMD64_OpLsh32x32(v)
856 case OpLsh32x64:
857 return rewriteValueAMD64_OpLsh32x64(v)
858 case OpLsh32x8:
859 return rewriteValueAMD64_OpLsh32x8(v)
860 case OpLsh64x16:
861 return rewriteValueAMD64_OpLsh64x16(v)
862 case OpLsh64x32:
863 return rewriteValueAMD64_OpLsh64x32(v)
864 case OpLsh64x64:
865 return rewriteValueAMD64_OpLsh64x64(v)
866 case OpLsh64x8:
867 return rewriteValueAMD64_OpLsh64x8(v)
868 case OpLsh8x16:
869 return rewriteValueAMD64_OpLsh8x16(v)
870 case OpLsh8x32:
871 return rewriteValueAMD64_OpLsh8x32(v)
872 case OpLsh8x64:
873 return rewriteValueAMD64_OpLsh8x64(v)
874 case OpLsh8x8:
875 return rewriteValueAMD64_OpLsh8x8(v)
876 case OpMod16:
877 return rewriteValueAMD64_OpMod16(v)
878 case OpMod16u:
879 return rewriteValueAMD64_OpMod16u(v)
880 case OpMod32:
881 return rewriteValueAMD64_OpMod32(v)
882 case OpMod32u:
883 return rewriteValueAMD64_OpMod32u(v)
884 case OpMod64:
885 return rewriteValueAMD64_OpMod64(v)
886 case OpMod64u:
887 return rewriteValueAMD64_OpMod64u(v)
888 case OpMod8:
889 return rewriteValueAMD64_OpMod8(v)
890 case OpMod8u:
891 return rewriteValueAMD64_OpMod8u(v)
892 case OpMove:
893 return rewriteValueAMD64_OpMove(v)
894 case OpMul16:
895 v.Op = OpAMD64MULL
896 return true
897 case OpMul32:
898 v.Op = OpAMD64MULL
899 return true
900 case OpMul32F:
901 v.Op = OpAMD64MULSS
902 return true
903 case OpMul64:
904 v.Op = OpAMD64MULQ
905 return true
906 case OpMul64F:
907 v.Op = OpAMD64MULSD
908 return true
909 case OpMul64uhilo:
910 v.Op = OpAMD64MULQU2
911 return true
912 case OpMul8:
913 v.Op = OpAMD64MULL
914 return true
915 case OpNeg16:
916 v.Op = OpAMD64NEGL
917 return true
918 case OpNeg32:
919 v.Op = OpAMD64NEGL
920 return true
921 case OpNeg32F:
922 return rewriteValueAMD64_OpNeg32F(v)
923 case OpNeg64:
924 v.Op = OpAMD64NEGQ
925 return true
926 case OpNeg64F:
927 return rewriteValueAMD64_OpNeg64F(v)
928 case OpNeg8:
929 v.Op = OpAMD64NEGL
930 return true
931 case OpNeq16:
932 return rewriteValueAMD64_OpNeq16(v)
933 case OpNeq32:
934 return rewriteValueAMD64_OpNeq32(v)
935 case OpNeq32F:
936 return rewriteValueAMD64_OpNeq32F(v)
937 case OpNeq64:
938 return rewriteValueAMD64_OpNeq64(v)
939 case OpNeq64F:
940 return rewriteValueAMD64_OpNeq64F(v)
941 case OpNeq8:
942 return rewriteValueAMD64_OpNeq8(v)
943 case OpNeqB:
944 return rewriteValueAMD64_OpNeqB(v)
945 case OpNeqPtr:
946 return rewriteValueAMD64_OpNeqPtr(v)
947 case OpNilCheck:
948 v.Op = OpAMD64LoweredNilCheck
949 return true
950 case OpNot:
951 return rewriteValueAMD64_OpNot(v)
952 case OpOffPtr:
953 return rewriteValueAMD64_OpOffPtr(v)
954 case OpOr16:
955 v.Op = OpAMD64ORL
956 return true
957 case OpOr32:
958 v.Op = OpAMD64ORL
959 return true
960 case OpOr64:
961 v.Op = OpAMD64ORQ
962 return true
963 case OpOr8:
964 v.Op = OpAMD64ORL
965 return true
966 case OpOrB:
967 v.Op = OpAMD64ORL
968 return true
969 case OpPanicBounds:
970 return rewriteValueAMD64_OpPanicBounds(v)
971 case OpPopCount16:
972 return rewriteValueAMD64_OpPopCount16(v)
973 case OpPopCount32:
974 v.Op = OpAMD64POPCNTL
975 return true
976 case OpPopCount64:
977 v.Op = OpAMD64POPCNTQ
978 return true
979 case OpPopCount8:
980 return rewriteValueAMD64_OpPopCount8(v)
981 case OpRotateLeft16:
982 v.Op = OpAMD64ROLW
983 return true
984 case OpRotateLeft32:
985 v.Op = OpAMD64ROLL
986 return true
987 case OpRotateLeft64:
988 v.Op = OpAMD64ROLQ
989 return true
990 case OpRotateLeft8:
991 v.Op = OpAMD64ROLB
992 return true
993 case OpRound32F:
994 v.Op = OpCopy
995 return true
996 case OpRound64F:
997 v.Op = OpCopy
998 return true
999 case OpRoundToEven:
1000 return rewriteValueAMD64_OpRoundToEven(v)
1001 case OpRsh16Ux16:
1002 return rewriteValueAMD64_OpRsh16Ux16(v)
1003 case OpRsh16Ux32:
1004 return rewriteValueAMD64_OpRsh16Ux32(v)
1005 case OpRsh16Ux64:
1006 return rewriteValueAMD64_OpRsh16Ux64(v)
1007 case OpRsh16Ux8:
1008 return rewriteValueAMD64_OpRsh16Ux8(v)
1009 case OpRsh16x16:
1010 return rewriteValueAMD64_OpRsh16x16(v)
1011 case OpRsh16x32:
1012 return rewriteValueAMD64_OpRsh16x32(v)
1013 case OpRsh16x64:
1014 return rewriteValueAMD64_OpRsh16x64(v)
1015 case OpRsh16x8:
1016 return rewriteValueAMD64_OpRsh16x8(v)
1017 case OpRsh32Ux16:
1018 return rewriteValueAMD64_OpRsh32Ux16(v)
1019 case OpRsh32Ux32:
1020 return rewriteValueAMD64_OpRsh32Ux32(v)
1021 case OpRsh32Ux64:
1022 return rewriteValueAMD64_OpRsh32Ux64(v)
1023 case OpRsh32Ux8:
1024 return rewriteValueAMD64_OpRsh32Ux8(v)
1025 case OpRsh32x16:
1026 return rewriteValueAMD64_OpRsh32x16(v)
1027 case OpRsh32x32:
1028 return rewriteValueAMD64_OpRsh32x32(v)
1029 case OpRsh32x64:
1030 return rewriteValueAMD64_OpRsh32x64(v)
1031 case OpRsh32x8:
1032 return rewriteValueAMD64_OpRsh32x8(v)
1033 case OpRsh64Ux16:
1034 return rewriteValueAMD64_OpRsh64Ux16(v)
1035 case OpRsh64Ux32:
1036 return rewriteValueAMD64_OpRsh64Ux32(v)
1037 case OpRsh64Ux64:
1038 return rewriteValueAMD64_OpRsh64Ux64(v)
1039 case OpRsh64Ux8:
1040 return rewriteValueAMD64_OpRsh64Ux8(v)
1041 case OpRsh64x16:
1042 return rewriteValueAMD64_OpRsh64x16(v)
1043 case OpRsh64x32:
1044 return rewriteValueAMD64_OpRsh64x32(v)
1045 case OpRsh64x64:
1046 return rewriteValueAMD64_OpRsh64x64(v)
1047 case OpRsh64x8:
1048 return rewriteValueAMD64_OpRsh64x8(v)
1049 case OpRsh8Ux16:
1050 return rewriteValueAMD64_OpRsh8Ux16(v)
1051 case OpRsh8Ux32:
1052 return rewriteValueAMD64_OpRsh8Ux32(v)
1053 case OpRsh8Ux64:
1054 return rewriteValueAMD64_OpRsh8Ux64(v)
1055 case OpRsh8Ux8:
1056 return rewriteValueAMD64_OpRsh8Ux8(v)
1057 case OpRsh8x16:
1058 return rewriteValueAMD64_OpRsh8x16(v)
1059 case OpRsh8x32:
1060 return rewriteValueAMD64_OpRsh8x32(v)
1061 case OpRsh8x64:
1062 return rewriteValueAMD64_OpRsh8x64(v)
1063 case OpRsh8x8:
1064 return rewriteValueAMD64_OpRsh8x8(v)
1065 case OpSelect0:
1066 return rewriteValueAMD64_OpSelect0(v)
1067 case OpSelect1:
1068 return rewriteValueAMD64_OpSelect1(v)
1069 case OpSignExt16to32:
1070 v.Op = OpAMD64MOVWQSX
1071 return true
1072 case OpSignExt16to64:
1073 v.Op = OpAMD64MOVWQSX
1074 return true
1075 case OpSignExt32to64:
1076 v.Op = OpAMD64MOVLQSX
1077 return true
1078 case OpSignExt8to16:
1079 v.Op = OpAMD64MOVBQSX
1080 return true
1081 case OpSignExt8to32:
1082 v.Op = OpAMD64MOVBQSX
1083 return true
1084 case OpSignExt8to64:
1085 v.Op = OpAMD64MOVBQSX
1086 return true
1087 case OpSlicemask:
1088 return rewriteValueAMD64_OpSlicemask(v)
1089 case OpSpectreIndex:
1090 return rewriteValueAMD64_OpSpectreIndex(v)
1091 case OpSpectreSliceIndex:
1092 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1093 case OpSqrt:
1094 v.Op = OpAMD64SQRTSD
1095 return true
1096 case OpStaticCall:
1097 v.Op = OpAMD64CALLstatic
1098 return true
1099 case OpStore:
1100 return rewriteValueAMD64_OpStore(v)
1101 case OpSub16:
1102 v.Op = OpAMD64SUBL
1103 return true
1104 case OpSub32:
1105 v.Op = OpAMD64SUBL
1106 return true
1107 case OpSub32F:
1108 v.Op = OpAMD64SUBSS
1109 return true
1110 case OpSub64:
1111 v.Op = OpAMD64SUBQ
1112 return true
1113 case OpSub64F:
1114 v.Op = OpAMD64SUBSD
1115 return true
1116 case OpSub8:
1117 v.Op = OpAMD64SUBL
1118 return true
1119 case OpSubPtr:
1120 v.Op = OpAMD64SUBQ
1121 return true
1122 case OpTrunc:
1123 return rewriteValueAMD64_OpTrunc(v)
1124 case OpTrunc16to8:
1125 v.Op = OpCopy
1126 return true
1127 case OpTrunc32to16:
1128 v.Op = OpCopy
1129 return true
1130 case OpTrunc32to8:
1131 v.Op = OpCopy
1132 return true
1133 case OpTrunc64to16:
1134 v.Op = OpCopy
1135 return true
1136 case OpTrunc64to32:
1137 v.Op = OpCopy
1138 return true
1139 case OpTrunc64to8:
1140 v.Op = OpCopy
1141 return true
1142 case OpWB:
1143 v.Op = OpAMD64LoweredWB
1144 return true
1145 case OpXor16:
1146 v.Op = OpAMD64XORL
1147 return true
1148 case OpXor32:
1149 v.Op = OpAMD64XORL
1150 return true
1151 case OpXor64:
1152 v.Op = OpAMD64XORQ
1153 return true
1154 case OpXor8:
1155 v.Op = OpAMD64XORL
1156 return true
1157 case OpZero:
1158 return rewriteValueAMD64_OpZero(v)
1159 case OpZeroExt16to32:
1160 v.Op = OpAMD64MOVWQZX
1161 return true
1162 case OpZeroExt16to64:
1163 v.Op = OpAMD64MOVWQZX
1164 return true
1165 case OpZeroExt32to64:
1166 v.Op = OpAMD64MOVLQZX
1167 return true
1168 case OpZeroExt8to16:
1169 v.Op = OpAMD64MOVBQZX
1170 return true
1171 case OpZeroExt8to32:
1172 v.Op = OpAMD64MOVBQZX
1173 return true
1174 case OpZeroExt8to64:
1175 v.Op = OpAMD64MOVBQZX
1176 return true
1177 }
1178 return false
1179 }
1180 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1181 v_2 := v.Args[2]
1182 v_1 := v.Args[1]
1183 v_0 := v.Args[0]
1184
1185
1186
1187 for {
1188 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1189 x := v_0
1190 if v_1.Op != OpAMD64MOVQconst {
1191 continue
1192 }
1193 c := auxIntToInt64(v_1.AuxInt)
1194 carry := v_2
1195 if !(is32Bit(c)) {
1196 continue
1197 }
1198 v.reset(OpAMD64ADCQconst)
1199 v.AuxInt = int32ToAuxInt(int32(c))
1200 v.AddArg2(x, carry)
1201 return true
1202 }
1203 break
1204 }
1205
1206
1207 for {
1208 x := v_0
1209 y := v_1
1210 if v_2.Op != OpAMD64FlagEQ {
1211 break
1212 }
1213 v.reset(OpAMD64ADDQcarry)
1214 v.AddArg2(x, y)
1215 return true
1216 }
1217 return false
1218 }
1219 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1220 v_1 := v.Args[1]
1221 v_0 := v.Args[0]
1222
1223
1224 for {
1225 c := auxIntToInt32(v.AuxInt)
1226 x := v_0
1227 if v_1.Op != OpAMD64FlagEQ {
1228 break
1229 }
1230 v.reset(OpAMD64ADDQconstcarry)
1231 v.AuxInt = int32ToAuxInt(c)
1232 v.AddArg(x)
1233 return true
1234 }
1235 return false
1236 }
1237 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1238 v_1 := v.Args[1]
1239 v_0 := v.Args[0]
1240
1241
1242 for {
1243 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1244 x := v_0
1245 if v_1.Op != OpAMD64MOVLconst {
1246 continue
1247 }
1248 c := v_1.AuxInt
1249 v.reset(OpAMD64ADDLconst)
1250 v.AuxInt = c
1251 v.AddArg(x)
1252 return true
1253 }
1254 break
1255 }
1256
1257
1258
1259 for {
1260 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1261 if v_0.Op != OpAMD64SHLLconst {
1262 continue
1263 }
1264 c := v_0.AuxInt
1265 x := v_0.Args[0]
1266 if v_1.Op != OpAMD64SHRLconst {
1267 continue
1268 }
1269 d := v_1.AuxInt
1270 if x != v_1.Args[0] || !(d == 32-c) {
1271 continue
1272 }
1273 v.reset(OpAMD64ROLLconst)
1274 v.AuxInt = c
1275 v.AddArg(x)
1276 return true
1277 }
1278 break
1279 }
1280
1281
1282
1283 for {
1284 t := v.Type
1285 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1286 if v_0.Op != OpAMD64SHLLconst {
1287 continue
1288 }
1289 c := v_0.AuxInt
1290 x := v_0.Args[0]
1291 if v_1.Op != OpAMD64SHRWconst {
1292 continue
1293 }
1294 d := v_1.AuxInt
1295 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
1296 continue
1297 }
1298 v.reset(OpAMD64ROLWconst)
1299 v.AuxInt = c
1300 v.AddArg(x)
1301 return true
1302 }
1303 break
1304 }
1305
1306
1307
1308 for {
1309 t := v.Type
1310 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1311 if v_0.Op != OpAMD64SHLLconst {
1312 continue
1313 }
1314 c := v_0.AuxInt
1315 x := v_0.Args[0]
1316 if v_1.Op != OpAMD64SHRBconst {
1317 continue
1318 }
1319 d := v_1.AuxInt
1320 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
1321 continue
1322 }
1323 v.reset(OpAMD64ROLBconst)
1324 v.AuxInt = c
1325 v.AddArg(x)
1326 return true
1327 }
1328 break
1329 }
1330
1331
1332 for {
1333 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1334 x := v_0
1335 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 {
1336 continue
1337 }
1338 y := v_1.Args[0]
1339 v.reset(OpAMD64LEAL8)
1340 v.AddArg2(x, y)
1341 return true
1342 }
1343 break
1344 }
1345
1346
1347 for {
1348 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1349 x := v_0
1350 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 {
1351 continue
1352 }
1353 y := v_1.Args[0]
1354 v.reset(OpAMD64LEAL4)
1355 v.AddArg2(x, y)
1356 return true
1357 }
1358 break
1359 }
1360
1361
1362 for {
1363 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1364 x := v_0
1365 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 {
1366 continue
1367 }
1368 y := v_1.Args[0]
1369 v.reset(OpAMD64LEAL2)
1370 v.AddArg2(x, y)
1371 return true
1372 }
1373 break
1374 }
1375
1376
1377 for {
1378 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1379 x := v_0
1380 if v_1.Op != OpAMD64ADDL {
1381 continue
1382 }
1383 y := v_1.Args[1]
1384 if y != v_1.Args[0] {
1385 continue
1386 }
1387 v.reset(OpAMD64LEAL2)
1388 v.AddArg2(x, y)
1389 return true
1390 }
1391 break
1392 }
1393
1394
1395 for {
1396 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1397 x := v_0
1398 if v_1.Op != OpAMD64ADDL {
1399 continue
1400 }
1401 _ = v_1.Args[1]
1402 v_1_0 := v_1.Args[0]
1403 v_1_1 := v_1.Args[1]
1404 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1405 if x != v_1_0 {
1406 continue
1407 }
1408 y := v_1_1
1409 v.reset(OpAMD64LEAL2)
1410 v.AddArg2(y, x)
1411 return true
1412 }
1413 }
1414 break
1415 }
1416
1417
1418 for {
1419 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1420 if v_0.Op != OpAMD64ADDLconst {
1421 continue
1422 }
1423 c := v_0.AuxInt
1424 x := v_0.Args[0]
1425 y := v_1
1426 v.reset(OpAMD64LEAL1)
1427 v.AuxInt = c
1428 v.AddArg2(x, y)
1429 return true
1430 }
1431 break
1432 }
1433
1434
1435
1436 for {
1437 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1438 x := v_0
1439 if v_1.Op != OpAMD64LEAL {
1440 continue
1441 }
1442 c := v_1.AuxInt
1443 s := v_1.Aux
1444 y := v_1.Args[0]
1445 if !(x.Op != OpSB && y.Op != OpSB) {
1446 continue
1447 }
1448 v.reset(OpAMD64LEAL1)
1449 v.AuxInt = c
1450 v.Aux = s
1451 v.AddArg2(x, y)
1452 return true
1453 }
1454 break
1455 }
1456
1457
1458 for {
1459 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1460 x := v_0
1461 if v_1.Op != OpAMD64NEGL {
1462 continue
1463 }
1464 y := v_1.Args[0]
1465 v.reset(OpAMD64SUBL)
1466 v.AddArg2(x, y)
1467 return true
1468 }
1469 break
1470 }
1471
1472
1473
1474 for {
1475 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1476 x := v_0
1477 l := v_1
1478 if l.Op != OpAMD64MOVLload {
1479 continue
1480 }
1481 off := l.AuxInt
1482 sym := l.Aux
1483 mem := l.Args[1]
1484 ptr := l.Args[0]
1485 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1486 continue
1487 }
1488 v.reset(OpAMD64ADDLload)
1489 v.AuxInt = off
1490 v.Aux = sym
1491 v.AddArg3(x, ptr, mem)
1492 return true
1493 }
1494 break
1495 }
1496 return false
1497 }
1498 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1499 v_0 := v.Args[0]
1500
1501
1502 for {
1503 c := v.AuxInt
1504 if v_0.Op != OpAMD64ADDL {
1505 break
1506 }
1507 y := v_0.Args[1]
1508 x := v_0.Args[0]
1509 v.reset(OpAMD64LEAL1)
1510 v.AuxInt = c
1511 v.AddArg2(x, y)
1512 return true
1513 }
1514
1515
1516 for {
1517 c := v.AuxInt
1518 if v_0.Op != OpAMD64SHLLconst || v_0.AuxInt != 1 {
1519 break
1520 }
1521 x := v_0.Args[0]
1522 v.reset(OpAMD64LEAL1)
1523 v.AuxInt = c
1524 v.AddArg2(x, x)
1525 return true
1526 }
1527
1528
1529
1530 for {
1531 c := v.AuxInt
1532 if v_0.Op != OpAMD64LEAL {
1533 break
1534 }
1535 d := v_0.AuxInt
1536 s := v_0.Aux
1537 x := v_0.Args[0]
1538 if !(is32Bit(c + d)) {
1539 break
1540 }
1541 v.reset(OpAMD64LEAL)
1542 v.AuxInt = c + d
1543 v.Aux = s
1544 v.AddArg(x)
1545 return true
1546 }
1547
1548
1549
1550 for {
1551 c := v.AuxInt
1552 if v_0.Op != OpAMD64LEAL1 {
1553 break
1554 }
1555 d := v_0.AuxInt
1556 s := v_0.Aux
1557 y := v_0.Args[1]
1558 x := v_0.Args[0]
1559 if !(is32Bit(c + d)) {
1560 break
1561 }
1562 v.reset(OpAMD64LEAL1)
1563 v.AuxInt = c + d
1564 v.Aux = s
1565 v.AddArg2(x, y)
1566 return true
1567 }
1568
1569
1570
1571 for {
1572 c := v.AuxInt
1573 if v_0.Op != OpAMD64LEAL2 {
1574 break
1575 }
1576 d := v_0.AuxInt
1577 s := v_0.Aux
1578 y := v_0.Args[1]
1579 x := v_0.Args[0]
1580 if !(is32Bit(c + d)) {
1581 break
1582 }
1583 v.reset(OpAMD64LEAL2)
1584 v.AuxInt = c + d
1585 v.Aux = s
1586 v.AddArg2(x, y)
1587 return true
1588 }
1589
1590
1591
1592 for {
1593 c := v.AuxInt
1594 if v_0.Op != OpAMD64LEAL4 {
1595 break
1596 }
1597 d := v_0.AuxInt
1598 s := v_0.Aux
1599 y := v_0.Args[1]
1600 x := v_0.Args[0]
1601 if !(is32Bit(c + d)) {
1602 break
1603 }
1604 v.reset(OpAMD64LEAL4)
1605 v.AuxInt = c + d
1606 v.Aux = s
1607 v.AddArg2(x, y)
1608 return true
1609 }
1610
1611
1612
1613 for {
1614 c := v.AuxInt
1615 if v_0.Op != OpAMD64LEAL8 {
1616 break
1617 }
1618 d := v_0.AuxInt
1619 s := v_0.Aux
1620 y := v_0.Args[1]
1621 x := v_0.Args[0]
1622 if !(is32Bit(c + d)) {
1623 break
1624 }
1625 v.reset(OpAMD64LEAL8)
1626 v.AuxInt = c + d
1627 v.Aux = s
1628 v.AddArg2(x, y)
1629 return true
1630 }
1631
1632
1633
1634 for {
1635 c := v.AuxInt
1636 x := v_0
1637 if !(int32(c) == 0) {
1638 break
1639 }
1640 v.copyOf(x)
1641 return true
1642 }
1643
1644
1645 for {
1646 c := v.AuxInt
1647 if v_0.Op != OpAMD64MOVLconst {
1648 break
1649 }
1650 d := v_0.AuxInt
1651 v.reset(OpAMD64MOVLconst)
1652 v.AuxInt = int64(int32(c + d))
1653 return true
1654 }
1655
1656
1657 for {
1658 c := v.AuxInt
1659 if v_0.Op != OpAMD64ADDLconst {
1660 break
1661 }
1662 d := v_0.AuxInt
1663 x := v_0.Args[0]
1664 v.reset(OpAMD64ADDLconst)
1665 v.AuxInt = int64(int32(c + d))
1666 v.AddArg(x)
1667 return true
1668 }
1669
1670
1671 for {
1672 off := v.AuxInt
1673 x := v_0
1674 if x.Op != OpSP {
1675 break
1676 }
1677 v.reset(OpAMD64LEAL)
1678 v.AuxInt = off
1679 v.AddArg(x)
1680 return true
1681 }
1682 return false
1683 }
1684 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1685 v_1 := v.Args[1]
1686 v_0 := v.Args[0]
1687
1688
1689
1690 for {
1691 valoff1 := v.AuxInt
1692 sym := v.Aux
1693 if v_0.Op != OpAMD64ADDQconst {
1694 break
1695 }
1696 off2 := v_0.AuxInt
1697 base := v_0.Args[0]
1698 mem := v_1
1699 if !(ValAndOff(valoff1).canAdd(off2)) {
1700 break
1701 }
1702 v.reset(OpAMD64ADDLconstmodify)
1703 v.AuxInt = ValAndOff(valoff1).add(off2)
1704 v.Aux = sym
1705 v.AddArg2(base, mem)
1706 return true
1707 }
1708
1709
1710
1711 for {
1712 valoff1 := v.AuxInt
1713 sym1 := v.Aux
1714 if v_0.Op != OpAMD64LEAQ {
1715 break
1716 }
1717 off2 := v_0.AuxInt
1718 sym2 := v_0.Aux
1719 base := v_0.Args[0]
1720 mem := v_1
1721 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
1722 break
1723 }
1724 v.reset(OpAMD64ADDLconstmodify)
1725 v.AuxInt = ValAndOff(valoff1).add(off2)
1726 v.Aux = mergeSym(sym1, sym2)
1727 v.AddArg2(base, mem)
1728 return true
1729 }
1730 return false
1731 }
1732 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1733 v_2 := v.Args[2]
1734 v_1 := v.Args[1]
1735 v_0 := v.Args[0]
1736 b := v.Block
1737 typ := &b.Func.Config.Types
1738
1739
1740
1741 for {
1742 off1 := v.AuxInt
1743 sym := v.Aux
1744 val := v_0
1745 if v_1.Op != OpAMD64ADDQconst {
1746 break
1747 }
1748 off2 := v_1.AuxInt
1749 base := v_1.Args[0]
1750 mem := v_2
1751 if !(is32Bit(off1 + off2)) {
1752 break
1753 }
1754 v.reset(OpAMD64ADDLload)
1755 v.AuxInt = off1 + off2
1756 v.Aux = sym
1757 v.AddArg3(val, base, mem)
1758 return true
1759 }
1760
1761
1762
1763 for {
1764 off1 := v.AuxInt
1765 sym1 := v.Aux
1766 val := v_0
1767 if v_1.Op != OpAMD64LEAQ {
1768 break
1769 }
1770 off2 := v_1.AuxInt
1771 sym2 := v_1.Aux
1772 base := v_1.Args[0]
1773 mem := v_2
1774 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
1775 break
1776 }
1777 v.reset(OpAMD64ADDLload)
1778 v.AuxInt = off1 + off2
1779 v.Aux = mergeSym(sym1, sym2)
1780 v.AddArg3(val, base, mem)
1781 return true
1782 }
1783
1784
1785 for {
1786 off := v.AuxInt
1787 sym := v.Aux
1788 x := v_0
1789 ptr := v_1
1790 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
1791 break
1792 }
1793 y := v_2.Args[1]
1794 if ptr != v_2.Args[0] {
1795 break
1796 }
1797 v.reset(OpAMD64ADDL)
1798 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1799 v0.AddArg(y)
1800 v.AddArg2(x, v0)
1801 return true
1802 }
1803 return false
1804 }
1805 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1806 v_2 := v.Args[2]
1807 v_1 := v.Args[1]
1808 v_0 := v.Args[0]
1809
1810
1811
1812 for {
1813 off1 := v.AuxInt
1814 sym := v.Aux
1815 if v_0.Op != OpAMD64ADDQconst {
1816 break
1817 }
1818 off2 := v_0.AuxInt
1819 base := v_0.Args[0]
1820 val := v_1
1821 mem := v_2
1822 if !(is32Bit(off1 + off2)) {
1823 break
1824 }
1825 v.reset(OpAMD64ADDLmodify)
1826 v.AuxInt = off1 + off2
1827 v.Aux = sym
1828 v.AddArg3(base, val, mem)
1829 return true
1830 }
1831
1832
1833
1834 for {
1835 off1 := v.AuxInt
1836 sym1 := v.Aux
1837 if v_0.Op != OpAMD64LEAQ {
1838 break
1839 }
1840 off2 := v_0.AuxInt
1841 sym2 := v_0.Aux
1842 base := v_0.Args[0]
1843 val := v_1
1844 mem := v_2
1845 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
1846 break
1847 }
1848 v.reset(OpAMD64ADDLmodify)
1849 v.AuxInt = off1 + off2
1850 v.Aux = mergeSym(sym1, sym2)
1851 v.AddArg3(base, val, mem)
1852 return true
1853 }
1854 return false
1855 }
1856 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1857 v_1 := v.Args[1]
1858 v_0 := v.Args[0]
1859
1860
1861
1862 for {
1863 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1864 x := v_0
1865 if v_1.Op != OpAMD64MOVQconst {
1866 continue
1867 }
1868 c := v_1.AuxInt
1869 if !(is32Bit(c)) {
1870 continue
1871 }
1872 v.reset(OpAMD64ADDQconst)
1873 v.AuxInt = c
1874 v.AddArg(x)
1875 return true
1876 }
1877 break
1878 }
1879
1880
1881
1882 for {
1883 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1884 x := v_0
1885 if v_1.Op != OpAMD64MOVLconst {
1886 continue
1887 }
1888 c := v_1.AuxInt
1889 if !(is32Bit(c)) {
1890 continue
1891 }
1892 v.reset(OpAMD64ADDQconst)
1893 v.AuxInt = int64(int32(c))
1894 v.AddArg(x)
1895 return true
1896 }
1897 break
1898 }
1899
1900
1901
1902 for {
1903 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1904 if v_0.Op != OpAMD64SHLQconst {
1905 continue
1906 }
1907 c := v_0.AuxInt
1908 x := v_0.Args[0]
1909 if v_1.Op != OpAMD64SHRQconst {
1910 continue
1911 }
1912 d := v_1.AuxInt
1913 if x != v_1.Args[0] || !(d == 64-c) {
1914 continue
1915 }
1916 v.reset(OpAMD64ROLQconst)
1917 v.AuxInt = c
1918 v.AddArg(x)
1919 return true
1920 }
1921 break
1922 }
1923
1924
1925 for {
1926 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1927 x := v_0
1928 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 {
1929 continue
1930 }
1931 y := v_1.Args[0]
1932 v.reset(OpAMD64LEAQ8)
1933 v.AddArg2(x, y)
1934 return true
1935 }
1936 break
1937 }
1938
1939
1940 for {
1941 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1942 x := v_0
1943 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 {
1944 continue
1945 }
1946 y := v_1.Args[0]
1947 v.reset(OpAMD64LEAQ4)
1948 v.AddArg2(x, y)
1949 return true
1950 }
1951 break
1952 }
1953
1954
1955 for {
1956 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1957 x := v_0
1958 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 {
1959 continue
1960 }
1961 y := v_1.Args[0]
1962 v.reset(OpAMD64LEAQ2)
1963 v.AddArg2(x, y)
1964 return true
1965 }
1966 break
1967 }
1968
1969
1970 for {
1971 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1972 x := v_0
1973 if v_1.Op != OpAMD64ADDQ {
1974 continue
1975 }
1976 y := v_1.Args[1]
1977 if y != v_1.Args[0] {
1978 continue
1979 }
1980 v.reset(OpAMD64LEAQ2)
1981 v.AddArg2(x, y)
1982 return true
1983 }
1984 break
1985 }
1986
1987
1988 for {
1989 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1990 x := v_0
1991 if v_1.Op != OpAMD64ADDQ {
1992 continue
1993 }
1994 _ = v_1.Args[1]
1995 v_1_0 := v_1.Args[0]
1996 v_1_1 := v_1.Args[1]
1997 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1998 if x != v_1_0 {
1999 continue
2000 }
2001 y := v_1_1
2002 v.reset(OpAMD64LEAQ2)
2003 v.AddArg2(y, x)
2004 return true
2005 }
2006 }
2007 break
2008 }
2009
2010
2011 for {
2012 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2013 if v_0.Op != OpAMD64ADDQconst {
2014 continue
2015 }
2016 c := v_0.AuxInt
2017 x := v_0.Args[0]
2018 y := v_1
2019 v.reset(OpAMD64LEAQ1)
2020 v.AuxInt = c
2021 v.AddArg2(x, y)
2022 return true
2023 }
2024 break
2025 }
2026
2027
2028
2029 for {
2030 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2031 x := v_0
2032 if v_1.Op != OpAMD64LEAQ {
2033 continue
2034 }
2035 c := v_1.AuxInt
2036 s := v_1.Aux
2037 y := v_1.Args[0]
2038 if !(x.Op != OpSB && y.Op != OpSB) {
2039 continue
2040 }
2041 v.reset(OpAMD64LEAQ1)
2042 v.AuxInt = c
2043 v.Aux = s
2044 v.AddArg2(x, y)
2045 return true
2046 }
2047 break
2048 }
2049
2050
2051 for {
2052 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2053 x := v_0
2054 if v_1.Op != OpAMD64NEGQ {
2055 continue
2056 }
2057 y := v_1.Args[0]
2058 v.reset(OpAMD64SUBQ)
2059 v.AddArg2(x, y)
2060 return true
2061 }
2062 break
2063 }
2064
2065
2066
2067 for {
2068 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2069 x := v_0
2070 l := v_1
2071 if l.Op != OpAMD64MOVQload {
2072 continue
2073 }
2074 off := l.AuxInt
2075 sym := l.Aux
2076 mem := l.Args[1]
2077 ptr := l.Args[0]
2078 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2079 continue
2080 }
2081 v.reset(OpAMD64ADDQload)
2082 v.AuxInt = off
2083 v.Aux = sym
2084 v.AddArg3(x, ptr, mem)
2085 return true
2086 }
2087 break
2088 }
2089 return false
2090 }
2091 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2092 v_1 := v.Args[1]
2093 v_0 := v.Args[0]
2094
2095
2096
2097 for {
2098 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2099 x := v_0
2100 if v_1.Op != OpAMD64MOVQconst {
2101 continue
2102 }
2103 c := auxIntToInt64(v_1.AuxInt)
2104 if !(is32Bit(c)) {
2105 continue
2106 }
2107 v.reset(OpAMD64ADDQconstcarry)
2108 v.AuxInt = int32ToAuxInt(int32(c))
2109 v.AddArg(x)
2110 return true
2111 }
2112 break
2113 }
2114 return false
2115 }
2116 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2117 v_0 := v.Args[0]
2118
2119
2120 for {
2121 c := v.AuxInt
2122 if v_0.Op != OpAMD64ADDQ {
2123 break
2124 }
2125 y := v_0.Args[1]
2126 x := v_0.Args[0]
2127 v.reset(OpAMD64LEAQ1)
2128 v.AuxInt = c
2129 v.AddArg2(x, y)
2130 return true
2131 }
2132
2133
2134 for {
2135 c := v.AuxInt
2136 if v_0.Op != OpAMD64SHLQconst || v_0.AuxInt != 1 {
2137 break
2138 }
2139 x := v_0.Args[0]
2140 v.reset(OpAMD64LEAQ1)
2141 v.AuxInt = c
2142 v.AddArg2(x, x)
2143 return true
2144 }
2145
2146
2147
2148 for {
2149 c := v.AuxInt
2150 if v_0.Op != OpAMD64LEAQ {
2151 break
2152 }
2153 d := v_0.AuxInt
2154 s := v_0.Aux
2155 x := v_0.Args[0]
2156 if !(is32Bit(c + d)) {
2157 break
2158 }
2159 v.reset(OpAMD64LEAQ)
2160 v.AuxInt = c + d
2161 v.Aux = s
2162 v.AddArg(x)
2163 return true
2164 }
2165
2166
2167
2168 for {
2169 c := v.AuxInt
2170 if v_0.Op != OpAMD64LEAQ1 {
2171 break
2172 }
2173 d := v_0.AuxInt
2174 s := v_0.Aux
2175 y := v_0.Args[1]
2176 x := v_0.Args[0]
2177 if !(is32Bit(c + d)) {
2178 break
2179 }
2180 v.reset(OpAMD64LEAQ1)
2181 v.AuxInt = c + d
2182 v.Aux = s
2183 v.AddArg2(x, y)
2184 return true
2185 }
2186
2187
2188
2189 for {
2190 c := v.AuxInt
2191 if v_0.Op != OpAMD64LEAQ2 {
2192 break
2193 }
2194 d := v_0.AuxInt
2195 s := v_0.Aux
2196 y := v_0.Args[1]
2197 x := v_0.Args[0]
2198 if !(is32Bit(c + d)) {
2199 break
2200 }
2201 v.reset(OpAMD64LEAQ2)
2202 v.AuxInt = c + d
2203 v.Aux = s
2204 v.AddArg2(x, y)
2205 return true
2206 }
2207
2208
2209
2210 for {
2211 c := v.AuxInt
2212 if v_0.Op != OpAMD64LEAQ4 {
2213 break
2214 }
2215 d := v_0.AuxInt
2216 s := v_0.Aux
2217 y := v_0.Args[1]
2218 x := v_0.Args[0]
2219 if !(is32Bit(c + d)) {
2220 break
2221 }
2222 v.reset(OpAMD64LEAQ4)
2223 v.AuxInt = c + d
2224 v.Aux = s
2225 v.AddArg2(x, y)
2226 return true
2227 }
2228
2229
2230
2231 for {
2232 c := v.AuxInt
2233 if v_0.Op != OpAMD64LEAQ8 {
2234 break
2235 }
2236 d := v_0.AuxInt
2237 s := v_0.Aux
2238 y := v_0.Args[1]
2239 x := v_0.Args[0]
2240 if !(is32Bit(c + d)) {
2241 break
2242 }
2243 v.reset(OpAMD64LEAQ8)
2244 v.AuxInt = c + d
2245 v.Aux = s
2246 v.AddArg2(x, y)
2247 return true
2248 }
2249
2250
2251 for {
2252 if v.AuxInt != 0 {
2253 break
2254 }
2255 x := v_0
2256 v.copyOf(x)
2257 return true
2258 }
2259
2260
2261 for {
2262 c := v.AuxInt
2263 if v_0.Op != OpAMD64MOVQconst {
2264 break
2265 }
2266 d := v_0.AuxInt
2267 v.reset(OpAMD64MOVQconst)
2268 v.AuxInt = c + d
2269 return true
2270 }
2271
2272
2273
2274 for {
2275 c := v.AuxInt
2276 if v_0.Op != OpAMD64ADDQconst {
2277 break
2278 }
2279 d := v_0.AuxInt
2280 x := v_0.Args[0]
2281 if !(is32Bit(c + d)) {
2282 break
2283 }
2284 v.reset(OpAMD64ADDQconst)
2285 v.AuxInt = c + d
2286 v.AddArg(x)
2287 return true
2288 }
2289
2290
2291 for {
2292 off := v.AuxInt
2293 x := v_0
2294 if x.Op != OpSP {
2295 break
2296 }
2297 v.reset(OpAMD64LEAQ)
2298 v.AuxInt = off
2299 v.AddArg(x)
2300 return true
2301 }
2302 return false
2303 }
2304 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2305 v_1 := v.Args[1]
2306 v_0 := v.Args[0]
2307
2308
2309
2310 for {
2311 valoff1 := v.AuxInt
2312 sym := v.Aux
2313 if v_0.Op != OpAMD64ADDQconst {
2314 break
2315 }
2316 off2 := v_0.AuxInt
2317 base := v_0.Args[0]
2318 mem := v_1
2319 if !(ValAndOff(valoff1).canAdd(off2)) {
2320 break
2321 }
2322 v.reset(OpAMD64ADDQconstmodify)
2323 v.AuxInt = ValAndOff(valoff1).add(off2)
2324 v.Aux = sym
2325 v.AddArg2(base, mem)
2326 return true
2327 }
2328
2329
2330
2331 for {
2332 valoff1 := v.AuxInt
2333 sym1 := v.Aux
2334 if v_0.Op != OpAMD64LEAQ {
2335 break
2336 }
2337 off2 := v_0.AuxInt
2338 sym2 := v_0.Aux
2339 base := v_0.Args[0]
2340 mem := v_1
2341 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
2342 break
2343 }
2344 v.reset(OpAMD64ADDQconstmodify)
2345 v.AuxInt = ValAndOff(valoff1).add(off2)
2346 v.Aux = mergeSym(sym1, sym2)
2347 v.AddArg2(base, mem)
2348 return true
2349 }
2350 return false
2351 }
2352 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2353 v_2 := v.Args[2]
2354 v_1 := v.Args[1]
2355 v_0 := v.Args[0]
2356 b := v.Block
2357 typ := &b.Func.Config.Types
2358
2359
2360
2361 for {
2362 off1 := v.AuxInt
2363 sym := v.Aux
2364 val := v_0
2365 if v_1.Op != OpAMD64ADDQconst {
2366 break
2367 }
2368 off2 := v_1.AuxInt
2369 base := v_1.Args[0]
2370 mem := v_2
2371 if !(is32Bit(off1 + off2)) {
2372 break
2373 }
2374 v.reset(OpAMD64ADDQload)
2375 v.AuxInt = off1 + off2
2376 v.Aux = sym
2377 v.AddArg3(val, base, mem)
2378 return true
2379 }
2380
2381
2382
2383 for {
2384 off1 := v.AuxInt
2385 sym1 := v.Aux
2386 val := v_0
2387 if v_1.Op != OpAMD64LEAQ {
2388 break
2389 }
2390 off2 := v_1.AuxInt
2391 sym2 := v_1.Aux
2392 base := v_1.Args[0]
2393 mem := v_2
2394 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2395 break
2396 }
2397 v.reset(OpAMD64ADDQload)
2398 v.AuxInt = off1 + off2
2399 v.Aux = mergeSym(sym1, sym2)
2400 v.AddArg3(val, base, mem)
2401 return true
2402 }
2403
2404
2405 for {
2406 off := v.AuxInt
2407 sym := v.Aux
2408 x := v_0
2409 ptr := v_1
2410 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
2411 break
2412 }
2413 y := v_2.Args[1]
2414 if ptr != v_2.Args[0] {
2415 break
2416 }
2417 v.reset(OpAMD64ADDQ)
2418 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2419 v0.AddArg(y)
2420 v.AddArg2(x, v0)
2421 return true
2422 }
2423 return false
2424 }
2425 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2426 v_2 := v.Args[2]
2427 v_1 := v.Args[1]
2428 v_0 := v.Args[0]
2429
2430
2431
2432 for {
2433 off1 := v.AuxInt
2434 sym := v.Aux
2435 if v_0.Op != OpAMD64ADDQconst {
2436 break
2437 }
2438 off2 := v_0.AuxInt
2439 base := v_0.Args[0]
2440 val := v_1
2441 mem := v_2
2442 if !(is32Bit(off1 + off2)) {
2443 break
2444 }
2445 v.reset(OpAMD64ADDQmodify)
2446 v.AuxInt = off1 + off2
2447 v.Aux = sym
2448 v.AddArg3(base, val, mem)
2449 return true
2450 }
2451
2452
2453
2454 for {
2455 off1 := v.AuxInt
2456 sym1 := v.Aux
2457 if v_0.Op != OpAMD64LEAQ {
2458 break
2459 }
2460 off2 := v_0.AuxInt
2461 sym2 := v_0.Aux
2462 base := v_0.Args[0]
2463 val := v_1
2464 mem := v_2
2465 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2466 break
2467 }
2468 v.reset(OpAMD64ADDQmodify)
2469 v.AuxInt = off1 + off2
2470 v.Aux = mergeSym(sym1, sym2)
2471 v.AddArg3(base, val, mem)
2472 return true
2473 }
2474 return false
2475 }
2476 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2477 v_1 := v.Args[1]
2478 v_0 := v.Args[0]
2479
2480
2481
2482 for {
2483 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2484 x := v_0
2485 l := v_1
2486 if l.Op != OpAMD64MOVSDload {
2487 continue
2488 }
2489 off := l.AuxInt
2490 sym := l.Aux
2491 mem := l.Args[1]
2492 ptr := l.Args[0]
2493 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2494 continue
2495 }
2496 v.reset(OpAMD64ADDSDload)
2497 v.AuxInt = off
2498 v.Aux = sym
2499 v.AddArg3(x, ptr, mem)
2500 return true
2501 }
2502 break
2503 }
2504 return false
2505 }
2506 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2507 v_2 := v.Args[2]
2508 v_1 := v.Args[1]
2509 v_0 := v.Args[0]
2510 b := v.Block
2511 typ := &b.Func.Config.Types
2512
2513
2514
2515 for {
2516 off1 := v.AuxInt
2517 sym := v.Aux
2518 val := v_0
2519 if v_1.Op != OpAMD64ADDQconst {
2520 break
2521 }
2522 off2 := v_1.AuxInt
2523 base := v_1.Args[0]
2524 mem := v_2
2525 if !(is32Bit(off1 + off2)) {
2526 break
2527 }
2528 v.reset(OpAMD64ADDSDload)
2529 v.AuxInt = off1 + off2
2530 v.Aux = sym
2531 v.AddArg3(val, base, mem)
2532 return true
2533 }
2534
2535
2536
2537 for {
2538 off1 := v.AuxInt
2539 sym1 := v.Aux
2540 val := v_0
2541 if v_1.Op != OpAMD64LEAQ {
2542 break
2543 }
2544 off2 := v_1.AuxInt
2545 sym2 := v_1.Aux
2546 base := v_1.Args[0]
2547 mem := v_2
2548 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2549 break
2550 }
2551 v.reset(OpAMD64ADDSDload)
2552 v.AuxInt = off1 + off2
2553 v.Aux = mergeSym(sym1, sym2)
2554 v.AddArg3(val, base, mem)
2555 return true
2556 }
2557
2558
2559 for {
2560 off := v.AuxInt
2561 sym := v.Aux
2562 x := v_0
2563 ptr := v_1
2564 if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym {
2565 break
2566 }
2567 y := v_2.Args[1]
2568 if ptr != v_2.Args[0] {
2569 break
2570 }
2571 v.reset(OpAMD64ADDSD)
2572 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2573 v0.AddArg(y)
2574 v.AddArg2(x, v0)
2575 return true
2576 }
2577 return false
2578 }
2579 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2580 v_1 := v.Args[1]
2581 v_0 := v.Args[0]
2582
2583
2584
2585 for {
2586 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2587 x := v_0
2588 l := v_1
2589 if l.Op != OpAMD64MOVSSload {
2590 continue
2591 }
2592 off := l.AuxInt
2593 sym := l.Aux
2594 mem := l.Args[1]
2595 ptr := l.Args[0]
2596 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2597 continue
2598 }
2599 v.reset(OpAMD64ADDSSload)
2600 v.AuxInt = off
2601 v.Aux = sym
2602 v.AddArg3(x, ptr, mem)
2603 return true
2604 }
2605 break
2606 }
2607 return false
2608 }
2609 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2610 v_2 := v.Args[2]
2611 v_1 := v.Args[1]
2612 v_0 := v.Args[0]
2613 b := v.Block
2614 typ := &b.Func.Config.Types
2615
2616
2617
2618 for {
2619 off1 := v.AuxInt
2620 sym := v.Aux
2621 val := v_0
2622 if v_1.Op != OpAMD64ADDQconst {
2623 break
2624 }
2625 off2 := v_1.AuxInt
2626 base := v_1.Args[0]
2627 mem := v_2
2628 if !(is32Bit(off1 + off2)) {
2629 break
2630 }
2631 v.reset(OpAMD64ADDSSload)
2632 v.AuxInt = off1 + off2
2633 v.Aux = sym
2634 v.AddArg3(val, base, mem)
2635 return true
2636 }
2637
2638
2639
2640 for {
2641 off1 := v.AuxInt
2642 sym1 := v.Aux
2643 val := v_0
2644 if v_1.Op != OpAMD64LEAQ {
2645 break
2646 }
2647 off2 := v_1.AuxInt
2648 sym2 := v_1.Aux
2649 base := v_1.Args[0]
2650 mem := v_2
2651 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2652 break
2653 }
2654 v.reset(OpAMD64ADDSSload)
2655 v.AuxInt = off1 + off2
2656 v.Aux = mergeSym(sym1, sym2)
2657 v.AddArg3(val, base, mem)
2658 return true
2659 }
2660
2661
2662 for {
2663 off := v.AuxInt
2664 sym := v.Aux
2665 x := v_0
2666 ptr := v_1
2667 if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym {
2668 break
2669 }
2670 y := v_2.Args[1]
2671 if ptr != v_2.Args[0] {
2672 break
2673 }
2674 v.reset(OpAMD64ADDSS)
2675 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2676 v0.AddArg(y)
2677 v.AddArg2(x, v0)
2678 return true
2679 }
2680 return false
2681 }
2682 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2683 v_1 := v.Args[1]
2684 v_0 := v.Args[0]
2685
2686
2687 for {
2688 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2689 if v_0.Op != OpAMD64NOTL {
2690 continue
2691 }
2692 v_0_0 := v_0.Args[0]
2693 if v_0_0.Op != OpAMD64SHLL {
2694 continue
2695 }
2696 y := v_0_0.Args[1]
2697 v_0_0_0 := v_0_0.Args[0]
2698 if v_0_0_0.Op != OpAMD64MOVLconst || v_0_0_0.AuxInt != 1 {
2699 continue
2700 }
2701 x := v_1
2702 v.reset(OpAMD64BTRL)
2703 v.AddArg2(x, y)
2704 return true
2705 }
2706 break
2707 }
2708
2709
2710
2711 for {
2712 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2713 if v_0.Op != OpAMD64MOVLconst {
2714 continue
2715 }
2716 c := v_0.AuxInt
2717 x := v_1
2718 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) {
2719 continue
2720 }
2721 v.reset(OpAMD64BTRLconst)
2722 v.AuxInt = log2uint32(^c)
2723 v.AddArg(x)
2724 return true
2725 }
2726 break
2727 }
2728
2729
2730 for {
2731 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2732 x := v_0
2733 if v_1.Op != OpAMD64MOVLconst {
2734 continue
2735 }
2736 c := v_1.AuxInt
2737 v.reset(OpAMD64ANDLconst)
2738 v.AuxInt = c
2739 v.AddArg(x)
2740 return true
2741 }
2742 break
2743 }
2744
2745
2746 for {
2747 x := v_0
2748 if x != v_1 {
2749 break
2750 }
2751 v.copyOf(x)
2752 return true
2753 }
2754
2755
2756
2757 for {
2758 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2759 x := v_0
2760 l := v_1
2761 if l.Op != OpAMD64MOVLload {
2762 continue
2763 }
2764 off := l.AuxInt
2765 sym := l.Aux
2766 mem := l.Args[1]
2767 ptr := l.Args[0]
2768 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2769 continue
2770 }
2771 v.reset(OpAMD64ANDLload)
2772 v.AuxInt = off
2773 v.Aux = sym
2774 v.AddArg3(x, ptr, mem)
2775 return true
2776 }
2777 break
2778 }
2779 return false
2780 }
2781 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2782 v_0 := v.Args[0]
2783
2784
2785
2786 for {
2787 c := v.AuxInt
2788 x := v_0
2789 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128) {
2790 break
2791 }
2792 v.reset(OpAMD64BTRLconst)
2793 v.AuxInt = log2uint32(^c)
2794 v.AddArg(x)
2795 return true
2796 }
2797
2798
2799 for {
2800 c := auxIntToInt32(v.AuxInt)
2801 if v_0.Op != OpAMD64ANDLconst {
2802 break
2803 }
2804 d := auxIntToInt32(v_0.AuxInt)
2805 x := v_0.Args[0]
2806 v.reset(OpAMD64ANDLconst)
2807 v.AuxInt = int32ToAuxInt(c & d)
2808 v.AddArg(x)
2809 return true
2810 }
2811
2812
2813 for {
2814 c := auxIntToInt32(v.AuxInt)
2815 if v_0.Op != OpAMD64BTRLconst {
2816 break
2817 }
2818 d := auxIntToInt8(v_0.AuxInt)
2819 x := v_0.Args[0]
2820 v.reset(OpAMD64ANDLconst)
2821 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2822 v.AddArg(x)
2823 return true
2824 }
2825
2826
2827 for {
2828 if v.AuxInt != 0xFF {
2829 break
2830 }
2831 x := v_0
2832 v.reset(OpAMD64MOVBQZX)
2833 v.AddArg(x)
2834 return true
2835 }
2836
2837
2838 for {
2839 if v.AuxInt != 0xFFFF {
2840 break
2841 }
2842 x := v_0
2843 v.reset(OpAMD64MOVWQZX)
2844 v.AddArg(x)
2845 return true
2846 }
2847
2848
2849
2850 for {
2851 c := v.AuxInt
2852 if !(int32(c) == 0) {
2853 break
2854 }
2855 v.reset(OpAMD64MOVLconst)
2856 v.AuxInt = 0
2857 return true
2858 }
2859
2860
2861
2862 for {
2863 c := v.AuxInt
2864 x := v_0
2865 if !(int32(c) == -1) {
2866 break
2867 }
2868 v.copyOf(x)
2869 return true
2870 }
2871
2872
2873 for {
2874 c := v.AuxInt
2875 if v_0.Op != OpAMD64MOVLconst {
2876 break
2877 }
2878 d := v_0.AuxInt
2879 v.reset(OpAMD64MOVLconst)
2880 v.AuxInt = c & d
2881 return true
2882 }
2883 return false
2884 }
2885 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2886 v_1 := v.Args[1]
2887 v_0 := v.Args[0]
2888
2889
2890
2891 for {
2892 valoff1 := v.AuxInt
2893 sym := v.Aux
2894 if v_0.Op != OpAMD64ADDQconst {
2895 break
2896 }
2897 off2 := v_0.AuxInt
2898 base := v_0.Args[0]
2899 mem := v_1
2900 if !(ValAndOff(valoff1).canAdd(off2)) {
2901 break
2902 }
2903 v.reset(OpAMD64ANDLconstmodify)
2904 v.AuxInt = ValAndOff(valoff1).add(off2)
2905 v.Aux = sym
2906 v.AddArg2(base, mem)
2907 return true
2908 }
2909
2910
2911
2912 for {
2913 valoff1 := v.AuxInt
2914 sym1 := v.Aux
2915 if v_0.Op != OpAMD64LEAQ {
2916 break
2917 }
2918 off2 := v_0.AuxInt
2919 sym2 := v_0.Aux
2920 base := v_0.Args[0]
2921 mem := v_1
2922 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
2923 break
2924 }
2925 v.reset(OpAMD64ANDLconstmodify)
2926 v.AuxInt = ValAndOff(valoff1).add(off2)
2927 v.Aux = mergeSym(sym1, sym2)
2928 v.AddArg2(base, mem)
2929 return true
2930 }
2931 return false
2932 }
2933 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2934 v_2 := v.Args[2]
2935 v_1 := v.Args[1]
2936 v_0 := v.Args[0]
2937 b := v.Block
2938 typ := &b.Func.Config.Types
2939
2940
2941
2942 for {
2943 off1 := v.AuxInt
2944 sym := v.Aux
2945 val := v_0
2946 if v_1.Op != OpAMD64ADDQconst {
2947 break
2948 }
2949 off2 := v_1.AuxInt
2950 base := v_1.Args[0]
2951 mem := v_2
2952 if !(is32Bit(off1 + off2)) {
2953 break
2954 }
2955 v.reset(OpAMD64ANDLload)
2956 v.AuxInt = off1 + off2
2957 v.Aux = sym
2958 v.AddArg3(val, base, mem)
2959 return true
2960 }
2961
2962
2963
2964 for {
2965 off1 := v.AuxInt
2966 sym1 := v.Aux
2967 val := v_0
2968 if v_1.Op != OpAMD64LEAQ {
2969 break
2970 }
2971 off2 := v_1.AuxInt
2972 sym2 := v_1.Aux
2973 base := v_1.Args[0]
2974 mem := v_2
2975 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2976 break
2977 }
2978 v.reset(OpAMD64ANDLload)
2979 v.AuxInt = off1 + off2
2980 v.Aux = mergeSym(sym1, sym2)
2981 v.AddArg3(val, base, mem)
2982 return true
2983 }
2984
2985
2986 for {
2987 off := v.AuxInt
2988 sym := v.Aux
2989 x := v_0
2990 ptr := v_1
2991 if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym {
2992 break
2993 }
2994 y := v_2.Args[1]
2995 if ptr != v_2.Args[0] {
2996 break
2997 }
2998 v.reset(OpAMD64ANDL)
2999 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
3000 v0.AddArg(y)
3001 v.AddArg2(x, v0)
3002 return true
3003 }
3004 return false
3005 }
3006 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
3007 v_2 := v.Args[2]
3008 v_1 := v.Args[1]
3009 v_0 := v.Args[0]
3010
3011
3012
3013 for {
3014 off1 := v.AuxInt
3015 sym := v.Aux
3016 if v_0.Op != OpAMD64ADDQconst {
3017 break
3018 }
3019 off2 := v_0.AuxInt
3020 base := v_0.Args[0]
3021 val := v_1
3022 mem := v_2
3023 if !(is32Bit(off1 + off2)) {
3024 break
3025 }
3026 v.reset(OpAMD64ANDLmodify)
3027 v.AuxInt = off1 + off2
3028 v.Aux = sym
3029 v.AddArg3(base, val, mem)
3030 return true
3031 }
3032
3033
3034
3035 for {
3036 off1 := v.AuxInt
3037 sym1 := v.Aux
3038 if v_0.Op != OpAMD64LEAQ {
3039 break
3040 }
3041 off2 := v_0.AuxInt
3042 sym2 := v_0.Aux
3043 base := v_0.Args[0]
3044 val := v_1
3045 mem := v_2
3046 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3047 break
3048 }
3049 v.reset(OpAMD64ANDLmodify)
3050 v.AuxInt = off1 + off2
3051 v.Aux = mergeSym(sym1, sym2)
3052 v.AddArg3(base, val, mem)
3053 return true
3054 }
3055 return false
3056 }
3057 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3058 v_1 := v.Args[1]
3059 v_0 := v.Args[0]
3060
3061
3062 for {
3063 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3064 if v_0.Op != OpAMD64NOTQ {
3065 continue
3066 }
3067 v_0_0 := v_0.Args[0]
3068 if v_0_0.Op != OpAMD64SHLQ {
3069 continue
3070 }
3071 y := v_0_0.Args[1]
3072 v_0_0_0 := v_0_0.Args[0]
3073 if v_0_0_0.Op != OpAMD64MOVQconst || v_0_0_0.AuxInt != 1 {
3074 continue
3075 }
3076 x := v_1
3077 v.reset(OpAMD64BTRQ)
3078 v.AddArg2(x, y)
3079 return true
3080 }
3081 break
3082 }
3083
3084
3085
3086 for {
3087 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3088 if v_0.Op != OpAMD64MOVQconst {
3089 continue
3090 }
3091 c := v_0.AuxInt
3092 x := v_1
3093 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3094 continue
3095 }
3096 v.reset(OpAMD64BTRQconst)
3097 v.AuxInt = log2(^c)
3098 v.AddArg(x)
3099 return true
3100 }
3101 break
3102 }
3103
3104
3105
3106 for {
3107 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3108 x := v_0
3109 if v_1.Op != OpAMD64MOVQconst {
3110 continue
3111 }
3112 c := v_1.AuxInt
3113 if !(is32Bit(c)) {
3114 continue
3115 }
3116 v.reset(OpAMD64ANDQconst)
3117 v.AuxInt = c
3118 v.AddArg(x)
3119 return true
3120 }
3121 break
3122 }
3123
3124
3125 for {
3126 x := v_0
3127 if x != v_1 {
3128 break
3129 }
3130 v.copyOf(x)
3131 return true
3132 }
3133
3134
3135
3136 for {
3137 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3138 x := v_0
3139 l := v_1
3140 if l.Op != OpAMD64MOVQload {
3141 continue
3142 }
3143 off := l.AuxInt
3144 sym := l.Aux
3145 mem := l.Args[1]
3146 ptr := l.Args[0]
3147 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3148 continue
3149 }
3150 v.reset(OpAMD64ANDQload)
3151 v.AuxInt = off
3152 v.Aux = sym
3153 v.AddArg3(x, ptr, mem)
3154 return true
3155 }
3156 break
3157 }
3158 return false
3159 }
3160 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3161 v_0 := v.Args[0]
3162
3163
3164
3165 for {
3166 c := v.AuxInt
3167 x := v_0
3168 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3169 break
3170 }
3171 v.reset(OpAMD64BTRQconst)
3172 v.AuxInt = log2(^c)
3173 v.AddArg(x)
3174 return true
3175 }
3176
3177
3178 for {
3179 c := auxIntToInt32(v.AuxInt)
3180 if v_0.Op != OpAMD64ANDQconst {
3181 break
3182 }
3183 d := auxIntToInt32(v_0.AuxInt)
3184 x := v_0.Args[0]
3185 v.reset(OpAMD64ANDQconst)
3186 v.AuxInt = int32ToAuxInt(c & d)
3187 v.AddArg(x)
3188 return true
3189 }
3190
3191
3192
3193 for {
3194 c := auxIntToInt32(v.AuxInt)
3195 if v_0.Op != OpAMD64BTRQconst {
3196 break
3197 }
3198 d := auxIntToInt8(v_0.AuxInt)
3199 x := v_0.Args[0]
3200 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3201 break
3202 }
3203 v.reset(OpAMD64ANDQconst)
3204 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3205 v.AddArg(x)
3206 return true
3207 }
3208
3209
3210 for {
3211 if v.AuxInt != 0xFF {
3212 break
3213 }
3214 x := v_0
3215 v.reset(OpAMD64MOVBQZX)
3216 v.AddArg(x)
3217 return true
3218 }
3219
3220
3221 for {
3222 if v.AuxInt != 0xFFFF {
3223 break
3224 }
3225 x := v_0
3226 v.reset(OpAMD64MOVWQZX)
3227 v.AddArg(x)
3228 return true
3229 }
3230
3231
3232 for {
3233 if v.AuxInt != 0xFFFFFFFF {
3234 break
3235 }
3236 x := v_0
3237 v.reset(OpAMD64MOVLQZX)
3238 v.AddArg(x)
3239 return true
3240 }
3241
3242
3243 for {
3244 if v.AuxInt != 0 {
3245 break
3246 }
3247 v.reset(OpAMD64MOVQconst)
3248 v.AuxInt = 0
3249 return true
3250 }
3251
3252
3253 for {
3254 if v.AuxInt != -1 {
3255 break
3256 }
3257 x := v_0
3258 v.copyOf(x)
3259 return true
3260 }
3261
3262
3263 for {
3264 c := v.AuxInt
3265 if v_0.Op != OpAMD64MOVQconst {
3266 break
3267 }
3268 d := v_0.AuxInt
3269 v.reset(OpAMD64MOVQconst)
3270 v.AuxInt = c & d
3271 return true
3272 }
3273 return false
3274 }
3275 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3276 v_1 := v.Args[1]
3277 v_0 := v.Args[0]
3278
3279
3280
3281 for {
3282 valoff1 := v.AuxInt
3283 sym := v.Aux
3284 if v_0.Op != OpAMD64ADDQconst {
3285 break
3286 }
3287 off2 := v_0.AuxInt
3288 base := v_0.Args[0]
3289 mem := v_1
3290 if !(ValAndOff(valoff1).canAdd(off2)) {
3291 break
3292 }
3293 v.reset(OpAMD64ANDQconstmodify)
3294 v.AuxInt = ValAndOff(valoff1).add(off2)
3295 v.Aux = sym
3296 v.AddArg2(base, mem)
3297 return true
3298 }
3299
3300
3301
3302 for {
3303 valoff1 := v.AuxInt
3304 sym1 := v.Aux
3305 if v_0.Op != OpAMD64LEAQ {
3306 break
3307 }
3308 off2 := v_0.AuxInt
3309 sym2 := v_0.Aux
3310 base := v_0.Args[0]
3311 mem := v_1
3312 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
3313 break
3314 }
3315 v.reset(OpAMD64ANDQconstmodify)
3316 v.AuxInt = ValAndOff(valoff1).add(off2)
3317 v.Aux = mergeSym(sym1, sym2)
3318 v.AddArg2(base, mem)
3319 return true
3320 }
3321 return false
3322 }
3323 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3324 v_2 := v.Args[2]
3325 v_1 := v.Args[1]
3326 v_0 := v.Args[0]
3327 b := v.Block
3328 typ := &b.Func.Config.Types
3329
3330
3331
3332 for {
3333 off1 := v.AuxInt
3334 sym := v.Aux
3335 val := v_0
3336 if v_1.Op != OpAMD64ADDQconst {
3337 break
3338 }
3339 off2 := v_1.AuxInt
3340 base := v_1.Args[0]
3341 mem := v_2
3342 if !(is32Bit(off1 + off2)) {
3343 break
3344 }
3345 v.reset(OpAMD64ANDQload)
3346 v.AuxInt = off1 + off2
3347 v.Aux = sym
3348 v.AddArg3(val, base, mem)
3349 return true
3350 }
3351
3352
3353
3354 for {
3355 off1 := v.AuxInt
3356 sym1 := v.Aux
3357 val := v_0
3358 if v_1.Op != OpAMD64LEAQ {
3359 break
3360 }
3361 off2 := v_1.AuxInt
3362 sym2 := v_1.Aux
3363 base := v_1.Args[0]
3364 mem := v_2
3365 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3366 break
3367 }
3368 v.reset(OpAMD64ANDQload)
3369 v.AuxInt = off1 + off2
3370 v.Aux = mergeSym(sym1, sym2)
3371 v.AddArg3(val, base, mem)
3372 return true
3373 }
3374
3375
3376 for {
3377 off := v.AuxInt
3378 sym := v.Aux
3379 x := v_0
3380 ptr := v_1
3381 if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym {
3382 break
3383 }
3384 y := v_2.Args[1]
3385 if ptr != v_2.Args[0] {
3386 break
3387 }
3388 v.reset(OpAMD64ANDQ)
3389 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3390 v0.AddArg(y)
3391 v.AddArg2(x, v0)
3392 return true
3393 }
3394 return false
3395 }
3396 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3397 v_2 := v.Args[2]
3398 v_1 := v.Args[1]
3399 v_0 := v.Args[0]
3400
3401
3402
3403 for {
3404 off1 := v.AuxInt
3405 sym := v.Aux
3406 if v_0.Op != OpAMD64ADDQconst {
3407 break
3408 }
3409 off2 := v_0.AuxInt
3410 base := v_0.Args[0]
3411 val := v_1
3412 mem := v_2
3413 if !(is32Bit(off1 + off2)) {
3414 break
3415 }
3416 v.reset(OpAMD64ANDQmodify)
3417 v.AuxInt = off1 + off2
3418 v.Aux = sym
3419 v.AddArg3(base, val, mem)
3420 return true
3421 }
3422
3423
3424
3425 for {
3426 off1 := v.AuxInt
3427 sym1 := v.Aux
3428 if v_0.Op != OpAMD64LEAQ {
3429 break
3430 }
3431 off2 := v_0.AuxInt
3432 sym2 := v_0.Aux
3433 base := v_0.Args[0]
3434 val := v_1
3435 mem := v_2
3436 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3437 break
3438 }
3439 v.reset(OpAMD64ANDQmodify)
3440 v.AuxInt = off1 + off2
3441 v.Aux = mergeSym(sym1, sym2)
3442 v.AddArg3(base, val, mem)
3443 return true
3444 }
3445 return false
3446 }
3447 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3448 v_0 := v.Args[0]
3449 b := v.Block
3450
3451
3452 for {
3453 if v_0.Op != OpAMD64ORQconst {
3454 break
3455 }
3456 t := v_0.Type
3457 if v_0.AuxInt != 1<<8 {
3458 break
3459 }
3460 v_0_0 := v_0.Args[0]
3461 if v_0_0.Op != OpAMD64MOVBQZX {
3462 break
3463 }
3464 x := v_0_0.Args[0]
3465 v.reset(OpAMD64BSFQ)
3466 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3467 v0.AuxInt = 1 << 8
3468 v0.AddArg(x)
3469 v.AddArg(v0)
3470 return true
3471 }
3472
3473
3474 for {
3475 if v_0.Op != OpAMD64ORQconst {
3476 break
3477 }
3478 t := v_0.Type
3479 if v_0.AuxInt != 1<<16 {
3480 break
3481 }
3482 v_0_0 := v_0.Args[0]
3483 if v_0_0.Op != OpAMD64MOVWQZX {
3484 break
3485 }
3486 x := v_0_0.Args[0]
3487 v.reset(OpAMD64BSFQ)
3488 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3489 v0.AuxInt = 1 << 16
3490 v0.AddArg(x)
3491 v.AddArg(v0)
3492 return true
3493 }
3494 return false
3495 }
3496 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3497 v_0 := v.Args[0]
3498
3499
3500 for {
3501 c := auxIntToInt8(v.AuxInt)
3502 if v_0.Op != OpAMD64XORLconst {
3503 break
3504 }
3505 d := auxIntToInt32(v_0.AuxInt)
3506 x := v_0.Args[0]
3507 v.reset(OpAMD64XORLconst)
3508 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3509 v.AddArg(x)
3510 return true
3511 }
3512
3513
3514 for {
3515 c := auxIntToInt8(v.AuxInt)
3516 if v_0.Op != OpAMD64BTCLconst {
3517 break
3518 }
3519 d := auxIntToInt8(v_0.AuxInt)
3520 x := v_0.Args[0]
3521 v.reset(OpAMD64XORLconst)
3522 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3523 v.AddArg(x)
3524 return true
3525 }
3526
3527
3528 for {
3529 c := auxIntToInt8(v.AuxInt)
3530 if v_0.Op != OpAMD64MOVLconst {
3531 break
3532 }
3533 d := auxIntToInt32(v_0.AuxInt)
3534 v.reset(OpAMD64MOVLconst)
3535 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3536 return true
3537 }
3538 return false
3539 }
3540 func rewriteValueAMD64_OpAMD64BTCLconstmodify(v *Value) bool {
3541 v_1 := v.Args[1]
3542 v_0 := v.Args[0]
3543
3544
3545
3546 for {
3547 valoff1 := v.AuxInt
3548 sym := v.Aux
3549 if v_0.Op != OpAMD64ADDQconst {
3550 break
3551 }
3552 off2 := v_0.AuxInt
3553 base := v_0.Args[0]
3554 mem := v_1
3555 if !(ValAndOff(valoff1).canAdd(off2)) {
3556 break
3557 }
3558 v.reset(OpAMD64BTCLconstmodify)
3559 v.AuxInt = ValAndOff(valoff1).add(off2)
3560 v.Aux = sym
3561 v.AddArg2(base, mem)
3562 return true
3563 }
3564
3565
3566
3567 for {
3568 valoff1 := v.AuxInt
3569 sym1 := v.Aux
3570 if v_0.Op != OpAMD64LEAQ {
3571 break
3572 }
3573 off2 := v_0.AuxInt
3574 sym2 := v_0.Aux
3575 base := v_0.Args[0]
3576 mem := v_1
3577 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
3578 break
3579 }
3580 v.reset(OpAMD64BTCLconstmodify)
3581 v.AuxInt = ValAndOff(valoff1).add(off2)
3582 v.Aux = mergeSym(sym1, sym2)
3583 v.AddArg2(base, mem)
3584 return true
3585 }
3586 return false
3587 }
3588 func rewriteValueAMD64_OpAMD64BTCLmodify(v *Value) bool {
3589 v_2 := v.Args[2]
3590 v_1 := v.Args[1]
3591 v_0 := v.Args[0]
3592
3593
3594
3595 for {
3596 off1 := v.AuxInt
3597 sym := v.Aux
3598 if v_0.Op != OpAMD64ADDQconst {
3599 break
3600 }
3601 off2 := v_0.AuxInt
3602 base := v_0.Args[0]
3603 val := v_1
3604 mem := v_2
3605 if !(is32Bit(off1 + off2)) {
3606 break
3607 }
3608 v.reset(OpAMD64BTCLmodify)
3609 v.AuxInt = off1 + off2
3610 v.Aux = sym
3611 v.AddArg3(base, val, mem)
3612 return true
3613 }
3614
3615
3616
3617 for {
3618 off1 := v.AuxInt
3619 sym1 := v.Aux
3620 if v_0.Op != OpAMD64LEAQ {
3621 break
3622 }
3623 off2 := v_0.AuxInt
3624 sym2 := v_0.Aux
3625 base := v_0.Args[0]
3626 val := v_1
3627 mem := v_2
3628 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3629 break
3630 }
3631 v.reset(OpAMD64BTCLmodify)
3632 v.AuxInt = off1 + off2
3633 v.Aux = mergeSym(sym1, sym2)
3634 v.AddArg3(base, val, mem)
3635 return true
3636 }
3637 return false
3638 }
3639 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3640 v_0 := v.Args[0]
3641
3642
3643
3644 for {
3645 c := auxIntToInt8(v.AuxInt)
3646 if v_0.Op != OpAMD64XORQconst {
3647 break
3648 }
3649 d := auxIntToInt32(v_0.AuxInt)
3650 x := v_0.Args[0]
3651 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3652 break
3653 }
3654 v.reset(OpAMD64XORQconst)
3655 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3656 v.AddArg(x)
3657 return true
3658 }
3659
3660
3661
3662 for {
3663 c := auxIntToInt8(v.AuxInt)
3664 if v_0.Op != OpAMD64BTCQconst {
3665 break
3666 }
3667 d := auxIntToInt8(v_0.AuxInt)
3668 x := v_0.Args[0]
3669 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3670 break
3671 }
3672 v.reset(OpAMD64XORQconst)
3673 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3674 v.AddArg(x)
3675 return true
3676 }
3677
3678
3679 for {
3680 c := v.AuxInt
3681 if v_0.Op != OpAMD64MOVQconst {
3682 break
3683 }
3684 d := v_0.AuxInt
3685 v.reset(OpAMD64MOVQconst)
3686 v.AuxInt = d ^ (1 << uint32(c))
3687 return true
3688 }
3689 return false
3690 }
3691 func rewriteValueAMD64_OpAMD64BTCQconstmodify(v *Value) bool {
3692 v_1 := v.Args[1]
3693 v_0 := v.Args[0]
3694
3695
3696
3697 for {
3698 valoff1 := v.AuxInt
3699 sym := v.Aux
3700 if v_0.Op != OpAMD64ADDQconst {
3701 break
3702 }
3703 off2 := v_0.AuxInt
3704 base := v_0.Args[0]
3705 mem := v_1
3706 if !(ValAndOff(valoff1).canAdd(off2)) {
3707 break
3708 }
3709 v.reset(OpAMD64BTCQconstmodify)
3710 v.AuxInt = ValAndOff(valoff1).add(off2)
3711 v.Aux = sym
3712 v.AddArg2(base, mem)
3713 return true
3714 }
3715
3716
3717
3718 for {
3719 valoff1 := v.AuxInt
3720 sym1 := v.Aux
3721 if v_0.Op != OpAMD64LEAQ {
3722 break
3723 }
3724 off2 := v_0.AuxInt
3725 sym2 := v_0.Aux
3726 base := v_0.Args[0]
3727 mem := v_1
3728 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
3729 break
3730 }
3731 v.reset(OpAMD64BTCQconstmodify)
3732 v.AuxInt = ValAndOff(valoff1).add(off2)
3733 v.Aux = mergeSym(sym1, sym2)
3734 v.AddArg2(base, mem)
3735 return true
3736 }
3737 return false
3738 }
3739 func rewriteValueAMD64_OpAMD64BTCQmodify(v *Value) bool {
3740 v_2 := v.Args[2]
3741 v_1 := v.Args[1]
3742 v_0 := v.Args[0]
3743
3744
3745
3746 for {
3747 off1 := v.AuxInt
3748 sym := v.Aux
3749 if v_0.Op != OpAMD64ADDQconst {
3750 break
3751 }
3752 off2 := v_0.AuxInt
3753 base := v_0.Args[0]
3754 val := v_1
3755 mem := v_2
3756 if !(is32Bit(off1 + off2)) {
3757 break
3758 }
3759 v.reset(OpAMD64BTCQmodify)
3760 v.AuxInt = off1 + off2
3761 v.Aux = sym
3762 v.AddArg3(base, val, mem)
3763 return true
3764 }
3765
3766
3767
3768 for {
3769 off1 := v.AuxInt
3770 sym1 := v.Aux
3771 if v_0.Op != OpAMD64LEAQ {
3772 break
3773 }
3774 off2 := v_0.AuxInt
3775 sym2 := v_0.Aux
3776 base := v_0.Args[0]
3777 val := v_1
3778 mem := v_2
3779 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3780 break
3781 }
3782 v.reset(OpAMD64BTCQmodify)
3783 v.AuxInt = off1 + off2
3784 v.Aux = mergeSym(sym1, sym2)
3785 v.AddArg3(base, val, mem)
3786 return true
3787 }
3788 return false
3789 }
3790 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3791 v_0 := v.Args[0]
3792
3793
3794
3795 for {
3796 c := v.AuxInt
3797 if v_0.Op != OpAMD64SHRQconst {
3798 break
3799 }
3800 d := v_0.AuxInt
3801 x := v_0.Args[0]
3802 if !((c + d) < 64) {
3803 break
3804 }
3805 v.reset(OpAMD64BTQconst)
3806 v.AuxInt = c + d
3807 v.AddArg(x)
3808 return true
3809 }
3810
3811
3812
3813 for {
3814 c := v.AuxInt
3815 if v_0.Op != OpAMD64SHLQconst {
3816 break
3817 }
3818 d := v_0.AuxInt
3819 x := v_0.Args[0]
3820 if !(c > d) {
3821 break
3822 }
3823 v.reset(OpAMD64BTLconst)
3824 v.AuxInt = c - d
3825 v.AddArg(x)
3826 return true
3827 }
3828
3829
3830 for {
3831 if v.AuxInt != 0 {
3832 break
3833 }
3834 s := v_0
3835 if s.Op != OpAMD64SHRQ {
3836 break
3837 }
3838 y := s.Args[1]
3839 x := s.Args[0]
3840 v.reset(OpAMD64BTQ)
3841 v.AddArg2(y, x)
3842 return true
3843 }
3844
3845
3846
3847 for {
3848 c := v.AuxInt
3849 if v_0.Op != OpAMD64SHRLconst {
3850 break
3851 }
3852 d := v_0.AuxInt
3853 x := v_0.Args[0]
3854 if !((c + d) < 32) {
3855 break
3856 }
3857 v.reset(OpAMD64BTLconst)
3858 v.AuxInt = c + d
3859 v.AddArg(x)
3860 return true
3861 }
3862
3863
3864
3865 for {
3866 c := v.AuxInt
3867 if v_0.Op != OpAMD64SHLLconst {
3868 break
3869 }
3870 d := v_0.AuxInt
3871 x := v_0.Args[0]
3872 if !(c > d) {
3873 break
3874 }
3875 v.reset(OpAMD64BTLconst)
3876 v.AuxInt = c - d
3877 v.AddArg(x)
3878 return true
3879 }
3880
3881
3882 for {
3883 if v.AuxInt != 0 {
3884 break
3885 }
3886 s := v_0
3887 if s.Op != OpAMD64SHRL {
3888 break
3889 }
3890 y := s.Args[1]
3891 x := s.Args[0]
3892 v.reset(OpAMD64BTL)
3893 v.AddArg2(y, x)
3894 return true
3895 }
3896 return false
3897 }
3898 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3899 v_0 := v.Args[0]
3900
3901
3902
3903 for {
3904 c := v.AuxInt
3905 if v_0.Op != OpAMD64SHRQconst {
3906 break
3907 }
3908 d := v_0.AuxInt
3909 x := v_0.Args[0]
3910 if !((c + d) < 64) {
3911 break
3912 }
3913 v.reset(OpAMD64BTQconst)
3914 v.AuxInt = c + d
3915 v.AddArg(x)
3916 return true
3917 }
3918
3919
3920
3921 for {
3922 c := v.AuxInt
3923 if v_0.Op != OpAMD64SHLQconst {
3924 break
3925 }
3926 d := v_0.AuxInt
3927 x := v_0.Args[0]
3928 if !(c > d) {
3929 break
3930 }
3931 v.reset(OpAMD64BTQconst)
3932 v.AuxInt = c - d
3933 v.AddArg(x)
3934 return true
3935 }
3936
3937
3938 for {
3939 if v.AuxInt != 0 {
3940 break
3941 }
3942 s := v_0
3943 if s.Op != OpAMD64SHRQ {
3944 break
3945 }
3946 y := s.Args[1]
3947 x := s.Args[0]
3948 v.reset(OpAMD64BTQ)
3949 v.AddArg2(y, x)
3950 return true
3951 }
3952 return false
3953 }
3954 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
3955 v_0 := v.Args[0]
3956
3957
3958 for {
3959 c := v.AuxInt
3960 if v_0.Op != OpAMD64BTSLconst || v_0.AuxInt != c {
3961 break
3962 }
3963 x := v_0.Args[0]
3964 v.reset(OpAMD64BTRLconst)
3965 v.AuxInt = c
3966 v.AddArg(x)
3967 return true
3968 }
3969
3970
3971 for {
3972 c := v.AuxInt
3973 if v_0.Op != OpAMD64BTCLconst || v_0.AuxInt != c {
3974 break
3975 }
3976 x := v_0.Args[0]
3977 v.reset(OpAMD64BTRLconst)
3978 v.AuxInt = c
3979 v.AddArg(x)
3980 return true
3981 }
3982
3983
3984 for {
3985 c := auxIntToInt8(v.AuxInt)
3986 if v_0.Op != OpAMD64ANDLconst {
3987 break
3988 }
3989 d := auxIntToInt32(v_0.AuxInt)
3990 x := v_0.Args[0]
3991 v.reset(OpAMD64ANDLconst)
3992 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
3993 v.AddArg(x)
3994 return true
3995 }
3996
3997
3998 for {
3999 c := auxIntToInt8(v.AuxInt)
4000 if v_0.Op != OpAMD64BTRLconst {
4001 break
4002 }
4003 d := auxIntToInt8(v_0.AuxInt)
4004 x := v_0.Args[0]
4005 v.reset(OpAMD64ANDLconst)
4006 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4007 v.AddArg(x)
4008 return true
4009 }
4010
4011
4012 for {
4013 c := auxIntToInt8(v.AuxInt)
4014 if v_0.Op != OpAMD64MOVLconst {
4015 break
4016 }
4017 d := auxIntToInt32(v_0.AuxInt)
4018 v.reset(OpAMD64MOVLconst)
4019 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4020 return true
4021 }
4022 return false
4023 }
4024 func rewriteValueAMD64_OpAMD64BTRLconstmodify(v *Value) bool {
4025 v_1 := v.Args[1]
4026 v_0 := v.Args[0]
4027
4028
4029
4030 for {
4031 valoff1 := v.AuxInt
4032 sym := v.Aux
4033 if v_0.Op != OpAMD64ADDQconst {
4034 break
4035 }
4036 off2 := v_0.AuxInt
4037 base := v_0.Args[0]
4038 mem := v_1
4039 if !(ValAndOff(valoff1).canAdd(off2)) {
4040 break
4041 }
4042 v.reset(OpAMD64BTRLconstmodify)
4043 v.AuxInt = ValAndOff(valoff1).add(off2)
4044 v.Aux = sym
4045 v.AddArg2(base, mem)
4046 return true
4047 }
4048
4049
4050
4051 for {
4052 valoff1 := v.AuxInt
4053 sym1 := v.Aux
4054 if v_0.Op != OpAMD64LEAQ {
4055 break
4056 }
4057 off2 := v_0.AuxInt
4058 sym2 := v_0.Aux
4059 base := v_0.Args[0]
4060 mem := v_1
4061 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4062 break
4063 }
4064 v.reset(OpAMD64BTRLconstmodify)
4065 v.AuxInt = ValAndOff(valoff1).add(off2)
4066 v.Aux = mergeSym(sym1, sym2)
4067 v.AddArg2(base, mem)
4068 return true
4069 }
4070 return false
4071 }
4072 func rewriteValueAMD64_OpAMD64BTRLmodify(v *Value) bool {
4073 v_2 := v.Args[2]
4074 v_1 := v.Args[1]
4075 v_0 := v.Args[0]
4076
4077
4078
4079 for {
4080 off1 := v.AuxInt
4081 sym := v.Aux
4082 if v_0.Op != OpAMD64ADDQconst {
4083 break
4084 }
4085 off2 := v_0.AuxInt
4086 base := v_0.Args[0]
4087 val := v_1
4088 mem := v_2
4089 if !(is32Bit(off1 + off2)) {
4090 break
4091 }
4092 v.reset(OpAMD64BTRLmodify)
4093 v.AuxInt = off1 + off2
4094 v.Aux = sym
4095 v.AddArg3(base, val, mem)
4096 return true
4097 }
4098
4099
4100
4101 for {
4102 off1 := v.AuxInt
4103 sym1 := v.Aux
4104 if v_0.Op != OpAMD64LEAQ {
4105 break
4106 }
4107 off2 := v_0.AuxInt
4108 sym2 := v_0.Aux
4109 base := v_0.Args[0]
4110 val := v_1
4111 mem := v_2
4112 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4113 break
4114 }
4115 v.reset(OpAMD64BTRLmodify)
4116 v.AuxInt = off1 + off2
4117 v.Aux = mergeSym(sym1, sym2)
4118 v.AddArg3(base, val, mem)
4119 return true
4120 }
4121 return false
4122 }
4123 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
4124 v_0 := v.Args[0]
4125
4126
4127 for {
4128 c := v.AuxInt
4129 if v_0.Op != OpAMD64BTSQconst || v_0.AuxInt != c {
4130 break
4131 }
4132 x := v_0.Args[0]
4133 v.reset(OpAMD64BTRQconst)
4134 v.AuxInt = c
4135 v.AddArg(x)
4136 return true
4137 }
4138
4139
4140 for {
4141 c := v.AuxInt
4142 if v_0.Op != OpAMD64BTCQconst || v_0.AuxInt != c {
4143 break
4144 }
4145 x := v_0.Args[0]
4146 v.reset(OpAMD64BTRQconst)
4147 v.AuxInt = c
4148 v.AddArg(x)
4149 return true
4150 }
4151
4152
4153
4154 for {
4155 c := auxIntToInt8(v.AuxInt)
4156 if v_0.Op != OpAMD64ANDQconst {
4157 break
4158 }
4159 d := auxIntToInt32(v_0.AuxInt)
4160 x := v_0.Args[0]
4161 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4162 break
4163 }
4164 v.reset(OpAMD64ANDQconst)
4165 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4166 v.AddArg(x)
4167 return true
4168 }
4169
4170
4171
4172 for {
4173 c := auxIntToInt8(v.AuxInt)
4174 if v_0.Op != OpAMD64BTRQconst {
4175 break
4176 }
4177 d := auxIntToInt8(v_0.AuxInt)
4178 x := v_0.Args[0]
4179 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4180 break
4181 }
4182 v.reset(OpAMD64ANDQconst)
4183 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4184 v.AddArg(x)
4185 return true
4186 }
4187
4188
4189 for {
4190 c := v.AuxInt
4191 if v_0.Op != OpAMD64MOVQconst {
4192 break
4193 }
4194 d := v_0.AuxInt
4195 v.reset(OpAMD64MOVQconst)
4196 v.AuxInt = d &^ (1 << uint32(c))
4197 return true
4198 }
4199 return false
4200 }
4201 func rewriteValueAMD64_OpAMD64BTRQconstmodify(v *Value) bool {
4202 v_1 := v.Args[1]
4203 v_0 := v.Args[0]
4204
4205
4206
4207 for {
4208 valoff1 := v.AuxInt
4209 sym := v.Aux
4210 if v_0.Op != OpAMD64ADDQconst {
4211 break
4212 }
4213 off2 := v_0.AuxInt
4214 base := v_0.Args[0]
4215 mem := v_1
4216 if !(ValAndOff(valoff1).canAdd(off2)) {
4217 break
4218 }
4219 v.reset(OpAMD64BTRQconstmodify)
4220 v.AuxInt = ValAndOff(valoff1).add(off2)
4221 v.Aux = sym
4222 v.AddArg2(base, mem)
4223 return true
4224 }
4225
4226
4227
4228 for {
4229 valoff1 := v.AuxInt
4230 sym1 := v.Aux
4231 if v_0.Op != OpAMD64LEAQ {
4232 break
4233 }
4234 off2 := v_0.AuxInt
4235 sym2 := v_0.Aux
4236 base := v_0.Args[0]
4237 mem := v_1
4238 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4239 break
4240 }
4241 v.reset(OpAMD64BTRQconstmodify)
4242 v.AuxInt = ValAndOff(valoff1).add(off2)
4243 v.Aux = mergeSym(sym1, sym2)
4244 v.AddArg2(base, mem)
4245 return true
4246 }
4247 return false
4248 }
4249 func rewriteValueAMD64_OpAMD64BTRQmodify(v *Value) bool {
4250 v_2 := v.Args[2]
4251 v_1 := v.Args[1]
4252 v_0 := v.Args[0]
4253
4254
4255
4256 for {
4257 off1 := v.AuxInt
4258 sym := v.Aux
4259 if v_0.Op != OpAMD64ADDQconst {
4260 break
4261 }
4262 off2 := v_0.AuxInt
4263 base := v_0.Args[0]
4264 val := v_1
4265 mem := v_2
4266 if !(is32Bit(off1 + off2)) {
4267 break
4268 }
4269 v.reset(OpAMD64BTRQmodify)
4270 v.AuxInt = off1 + off2
4271 v.Aux = sym
4272 v.AddArg3(base, val, mem)
4273 return true
4274 }
4275
4276
4277
4278 for {
4279 off1 := v.AuxInt
4280 sym1 := v.Aux
4281 if v_0.Op != OpAMD64LEAQ {
4282 break
4283 }
4284 off2 := v_0.AuxInt
4285 sym2 := v_0.Aux
4286 base := v_0.Args[0]
4287 val := v_1
4288 mem := v_2
4289 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4290 break
4291 }
4292 v.reset(OpAMD64BTRQmodify)
4293 v.AuxInt = off1 + off2
4294 v.Aux = mergeSym(sym1, sym2)
4295 v.AddArg3(base, val, mem)
4296 return true
4297 }
4298 return false
4299 }
4300 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4301 v_0 := v.Args[0]
4302
4303
4304 for {
4305 c := v.AuxInt
4306 if v_0.Op != OpAMD64BTRLconst || v_0.AuxInt != c {
4307 break
4308 }
4309 x := v_0.Args[0]
4310 v.reset(OpAMD64BTSLconst)
4311 v.AuxInt = c
4312 v.AddArg(x)
4313 return true
4314 }
4315
4316
4317 for {
4318 c := v.AuxInt
4319 if v_0.Op != OpAMD64BTCLconst || v_0.AuxInt != c {
4320 break
4321 }
4322 x := v_0.Args[0]
4323 v.reset(OpAMD64BTSLconst)
4324 v.AuxInt = c
4325 v.AddArg(x)
4326 return true
4327 }
4328
4329
4330 for {
4331 c := auxIntToInt8(v.AuxInt)
4332 if v_0.Op != OpAMD64ORLconst {
4333 break
4334 }
4335 d := auxIntToInt32(v_0.AuxInt)
4336 x := v_0.Args[0]
4337 v.reset(OpAMD64ORLconst)
4338 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4339 v.AddArg(x)
4340 return true
4341 }
4342
4343
4344 for {
4345 c := auxIntToInt8(v.AuxInt)
4346 if v_0.Op != OpAMD64BTSLconst {
4347 break
4348 }
4349 d := auxIntToInt8(v_0.AuxInt)
4350 x := v_0.Args[0]
4351 v.reset(OpAMD64ORLconst)
4352 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4353 v.AddArg(x)
4354 return true
4355 }
4356
4357
4358 for {
4359 c := auxIntToInt8(v.AuxInt)
4360 if v_0.Op != OpAMD64MOVLconst {
4361 break
4362 }
4363 d := auxIntToInt32(v_0.AuxInt)
4364 v.reset(OpAMD64MOVLconst)
4365 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4366 return true
4367 }
4368 return false
4369 }
4370 func rewriteValueAMD64_OpAMD64BTSLconstmodify(v *Value) bool {
4371 v_1 := v.Args[1]
4372 v_0 := v.Args[0]
4373
4374
4375
4376 for {
4377 valoff1 := v.AuxInt
4378 sym := v.Aux
4379 if v_0.Op != OpAMD64ADDQconst {
4380 break
4381 }
4382 off2 := v_0.AuxInt
4383 base := v_0.Args[0]
4384 mem := v_1
4385 if !(ValAndOff(valoff1).canAdd(off2)) {
4386 break
4387 }
4388 v.reset(OpAMD64BTSLconstmodify)
4389 v.AuxInt = ValAndOff(valoff1).add(off2)
4390 v.Aux = sym
4391 v.AddArg2(base, mem)
4392 return true
4393 }
4394
4395
4396
4397 for {
4398 valoff1 := v.AuxInt
4399 sym1 := v.Aux
4400 if v_0.Op != OpAMD64LEAQ {
4401 break
4402 }
4403 off2 := v_0.AuxInt
4404 sym2 := v_0.Aux
4405 base := v_0.Args[0]
4406 mem := v_1
4407 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4408 break
4409 }
4410 v.reset(OpAMD64BTSLconstmodify)
4411 v.AuxInt = ValAndOff(valoff1).add(off2)
4412 v.Aux = mergeSym(sym1, sym2)
4413 v.AddArg2(base, mem)
4414 return true
4415 }
4416 return false
4417 }
4418 func rewriteValueAMD64_OpAMD64BTSLmodify(v *Value) bool {
4419 v_2 := v.Args[2]
4420 v_1 := v.Args[1]
4421 v_0 := v.Args[0]
4422
4423
4424
4425 for {
4426 off1 := v.AuxInt
4427 sym := v.Aux
4428 if v_0.Op != OpAMD64ADDQconst {
4429 break
4430 }
4431 off2 := v_0.AuxInt
4432 base := v_0.Args[0]
4433 val := v_1
4434 mem := v_2
4435 if !(is32Bit(off1 + off2)) {
4436 break
4437 }
4438 v.reset(OpAMD64BTSLmodify)
4439 v.AuxInt = off1 + off2
4440 v.Aux = sym
4441 v.AddArg3(base, val, mem)
4442 return true
4443 }
4444
4445
4446
4447 for {
4448 off1 := v.AuxInt
4449 sym1 := v.Aux
4450 if v_0.Op != OpAMD64LEAQ {
4451 break
4452 }
4453 off2 := v_0.AuxInt
4454 sym2 := v_0.Aux
4455 base := v_0.Args[0]
4456 val := v_1
4457 mem := v_2
4458 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4459 break
4460 }
4461 v.reset(OpAMD64BTSLmodify)
4462 v.AuxInt = off1 + off2
4463 v.Aux = mergeSym(sym1, sym2)
4464 v.AddArg3(base, val, mem)
4465 return true
4466 }
4467 return false
4468 }
4469 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4470 v_0 := v.Args[0]
4471
4472
4473 for {
4474 c := v.AuxInt
4475 if v_0.Op != OpAMD64BTRQconst || v_0.AuxInt != c {
4476 break
4477 }
4478 x := v_0.Args[0]
4479 v.reset(OpAMD64BTSQconst)
4480 v.AuxInt = c
4481 v.AddArg(x)
4482 return true
4483 }
4484
4485
4486 for {
4487 c := v.AuxInt
4488 if v_0.Op != OpAMD64BTCQconst || v_0.AuxInt != c {
4489 break
4490 }
4491 x := v_0.Args[0]
4492 v.reset(OpAMD64BTSQconst)
4493 v.AuxInt = c
4494 v.AddArg(x)
4495 return true
4496 }
4497
4498
4499
4500 for {
4501 c := auxIntToInt8(v.AuxInt)
4502 if v_0.Op != OpAMD64ORQconst {
4503 break
4504 }
4505 d := auxIntToInt32(v_0.AuxInt)
4506 x := v_0.Args[0]
4507 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4508 break
4509 }
4510 v.reset(OpAMD64ORQconst)
4511 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4512 v.AddArg(x)
4513 return true
4514 }
4515
4516
4517
4518 for {
4519 c := auxIntToInt8(v.AuxInt)
4520 if v_0.Op != OpAMD64BTSQconst {
4521 break
4522 }
4523 d := auxIntToInt8(v_0.AuxInt)
4524 x := v_0.Args[0]
4525 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4526 break
4527 }
4528 v.reset(OpAMD64ORQconst)
4529 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4530 v.AddArg(x)
4531 return true
4532 }
4533
4534
4535 for {
4536 c := v.AuxInt
4537 if v_0.Op != OpAMD64MOVQconst {
4538 break
4539 }
4540 d := v_0.AuxInt
4541 v.reset(OpAMD64MOVQconst)
4542 v.AuxInt = d | (1 << uint32(c))
4543 return true
4544 }
4545 return false
4546 }
4547 func rewriteValueAMD64_OpAMD64BTSQconstmodify(v *Value) bool {
4548 v_1 := v.Args[1]
4549 v_0 := v.Args[0]
4550
4551
4552
4553 for {
4554 valoff1 := v.AuxInt
4555 sym := v.Aux
4556 if v_0.Op != OpAMD64ADDQconst {
4557 break
4558 }
4559 off2 := v_0.AuxInt
4560 base := v_0.Args[0]
4561 mem := v_1
4562 if !(ValAndOff(valoff1).canAdd(off2)) {
4563 break
4564 }
4565 v.reset(OpAMD64BTSQconstmodify)
4566 v.AuxInt = ValAndOff(valoff1).add(off2)
4567 v.Aux = sym
4568 v.AddArg2(base, mem)
4569 return true
4570 }
4571
4572
4573
4574 for {
4575 valoff1 := v.AuxInt
4576 sym1 := v.Aux
4577 if v_0.Op != OpAMD64LEAQ {
4578 break
4579 }
4580 off2 := v_0.AuxInt
4581 sym2 := v_0.Aux
4582 base := v_0.Args[0]
4583 mem := v_1
4584 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4585 break
4586 }
4587 v.reset(OpAMD64BTSQconstmodify)
4588 v.AuxInt = ValAndOff(valoff1).add(off2)
4589 v.Aux = mergeSym(sym1, sym2)
4590 v.AddArg2(base, mem)
4591 return true
4592 }
4593 return false
4594 }
4595 func rewriteValueAMD64_OpAMD64BTSQmodify(v *Value) bool {
4596 v_2 := v.Args[2]
4597 v_1 := v.Args[1]
4598 v_0 := v.Args[0]
4599
4600
4601
4602 for {
4603 off1 := v.AuxInt
4604 sym := v.Aux
4605 if v_0.Op != OpAMD64ADDQconst {
4606 break
4607 }
4608 off2 := v_0.AuxInt
4609 base := v_0.Args[0]
4610 val := v_1
4611 mem := v_2
4612 if !(is32Bit(off1 + off2)) {
4613 break
4614 }
4615 v.reset(OpAMD64BTSQmodify)
4616 v.AuxInt = off1 + off2
4617 v.Aux = sym
4618 v.AddArg3(base, val, mem)
4619 return true
4620 }
4621
4622
4623
4624 for {
4625 off1 := v.AuxInt
4626 sym1 := v.Aux
4627 if v_0.Op != OpAMD64LEAQ {
4628 break
4629 }
4630 off2 := v_0.AuxInt
4631 sym2 := v_0.Aux
4632 base := v_0.Args[0]
4633 val := v_1
4634 mem := v_2
4635 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4636 break
4637 }
4638 v.reset(OpAMD64BTSQmodify)
4639 v.AuxInt = off1 + off2
4640 v.Aux = mergeSym(sym1, sym2)
4641 v.AddArg3(base, val, mem)
4642 return true
4643 }
4644 return false
4645 }
4646 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4647 v_2 := v.Args[2]
4648 v_1 := v.Args[1]
4649 v_0 := v.Args[0]
4650
4651
4652 for {
4653 x := v_0
4654 y := v_1
4655 if v_2.Op != OpAMD64InvertFlags {
4656 break
4657 }
4658 cond := v_2.Args[0]
4659 v.reset(OpAMD64CMOVLLS)
4660 v.AddArg3(x, y, cond)
4661 return true
4662 }
4663
4664
4665 for {
4666 x := v_1
4667 if v_2.Op != OpAMD64FlagEQ {
4668 break
4669 }
4670 v.copyOf(x)
4671 return true
4672 }
4673
4674
4675 for {
4676 x := v_1
4677 if v_2.Op != OpAMD64FlagGT_UGT {
4678 break
4679 }
4680 v.copyOf(x)
4681 return true
4682 }
4683
4684
4685 for {
4686 y := v_0
4687 if v_2.Op != OpAMD64FlagGT_ULT {
4688 break
4689 }
4690 v.copyOf(y)
4691 return true
4692 }
4693
4694
4695 for {
4696 y := v_0
4697 if v_2.Op != OpAMD64FlagLT_ULT {
4698 break
4699 }
4700 v.copyOf(y)
4701 return true
4702 }
4703
4704
4705 for {
4706 x := v_1
4707 if v_2.Op != OpAMD64FlagLT_UGT {
4708 break
4709 }
4710 v.copyOf(x)
4711 return true
4712 }
4713 return false
4714 }
4715 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4716 v_2 := v.Args[2]
4717 v_1 := v.Args[1]
4718 v_0 := v.Args[0]
4719
4720
4721 for {
4722 x := v_0
4723 y := v_1
4724 if v_2.Op != OpAMD64InvertFlags {
4725 break
4726 }
4727 cond := v_2.Args[0]
4728 v.reset(OpAMD64CMOVLHI)
4729 v.AddArg3(x, y, cond)
4730 return true
4731 }
4732
4733
4734 for {
4735 y := v_0
4736 if v_2.Op != OpAMD64FlagEQ {
4737 break
4738 }
4739 v.copyOf(y)
4740 return true
4741 }
4742
4743
4744 for {
4745 y := v_0
4746 if v_2.Op != OpAMD64FlagGT_UGT {
4747 break
4748 }
4749 v.copyOf(y)
4750 return true
4751 }
4752
4753
4754 for {
4755 x := v_1
4756 if v_2.Op != OpAMD64FlagGT_ULT {
4757 break
4758 }
4759 v.copyOf(x)
4760 return true
4761 }
4762
4763
4764 for {
4765 x := v_1
4766 if v_2.Op != OpAMD64FlagLT_ULT {
4767 break
4768 }
4769 v.copyOf(x)
4770 return true
4771 }
4772
4773
4774 for {
4775 y := v_0
4776 if v_2.Op != OpAMD64FlagLT_UGT {
4777 break
4778 }
4779 v.copyOf(y)
4780 return true
4781 }
4782 return false
4783 }
4784 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4785 v_2 := v.Args[2]
4786 v_1 := v.Args[1]
4787 v_0 := v.Args[0]
4788
4789
4790 for {
4791 x := v_0
4792 y := v_1
4793 if v_2.Op != OpAMD64InvertFlags {
4794 break
4795 }
4796 cond := v_2.Args[0]
4797 v.reset(OpAMD64CMOVLEQ)
4798 v.AddArg3(x, y, cond)
4799 return true
4800 }
4801
4802
4803 for {
4804 x := v_1
4805 if v_2.Op != OpAMD64FlagEQ {
4806 break
4807 }
4808 v.copyOf(x)
4809 return true
4810 }
4811
4812
4813 for {
4814 y := v_0
4815 if v_2.Op != OpAMD64FlagGT_UGT {
4816 break
4817 }
4818 v.copyOf(y)
4819 return true
4820 }
4821
4822
4823 for {
4824 y := v_0
4825 if v_2.Op != OpAMD64FlagGT_ULT {
4826 break
4827 }
4828 v.copyOf(y)
4829 return true
4830 }
4831
4832
4833 for {
4834 y := v_0
4835 if v_2.Op != OpAMD64FlagLT_ULT {
4836 break
4837 }
4838 v.copyOf(y)
4839 return true
4840 }
4841
4842
4843 for {
4844 y := v_0
4845 if v_2.Op != OpAMD64FlagLT_UGT {
4846 break
4847 }
4848 v.copyOf(y)
4849 return true
4850 }
4851 return false
4852 }
4853 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4854 v_2 := v.Args[2]
4855 v_1 := v.Args[1]
4856 v_0 := v.Args[0]
4857
4858
4859 for {
4860 x := v_0
4861 y := v_1
4862 if v_2.Op != OpAMD64InvertFlags {
4863 break
4864 }
4865 cond := v_2.Args[0]
4866 v.reset(OpAMD64CMOVLLE)
4867 v.AddArg3(x, y, cond)
4868 return true
4869 }
4870
4871
4872 for {
4873 x := v_1
4874 if v_2.Op != OpAMD64FlagEQ {
4875 break
4876 }
4877 v.copyOf(x)
4878 return true
4879 }
4880
4881
4882 for {
4883 x := v_1
4884 if v_2.Op != OpAMD64FlagGT_UGT {
4885 break
4886 }
4887 v.copyOf(x)
4888 return true
4889 }
4890
4891
4892 for {
4893 x := v_1
4894 if v_2.Op != OpAMD64FlagGT_ULT {
4895 break
4896 }
4897 v.copyOf(x)
4898 return true
4899 }
4900
4901
4902 for {
4903 y := v_0
4904 if v_2.Op != OpAMD64FlagLT_ULT {
4905 break
4906 }
4907 v.copyOf(y)
4908 return true
4909 }
4910
4911
4912 for {
4913 y := v_0
4914 if v_2.Op != OpAMD64FlagLT_UGT {
4915 break
4916 }
4917 v.copyOf(y)
4918 return true
4919 }
4920 return false
4921 }
4922 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4923 v_2 := v.Args[2]
4924 v_1 := v.Args[1]
4925 v_0 := v.Args[0]
4926
4927
4928 for {
4929 x := v_0
4930 y := v_1
4931 if v_2.Op != OpAMD64InvertFlags {
4932 break
4933 }
4934 cond := v_2.Args[0]
4935 v.reset(OpAMD64CMOVLLT)
4936 v.AddArg3(x, y, cond)
4937 return true
4938 }
4939
4940
4941 for {
4942 y := v_0
4943 if v_2.Op != OpAMD64FlagEQ {
4944 break
4945 }
4946 v.copyOf(y)
4947 return true
4948 }
4949
4950
4951 for {
4952 x := v_1
4953 if v_2.Op != OpAMD64FlagGT_UGT {
4954 break
4955 }
4956 v.copyOf(x)
4957 return true
4958 }
4959
4960
4961 for {
4962 x := v_1
4963 if v_2.Op != OpAMD64FlagGT_ULT {
4964 break
4965 }
4966 v.copyOf(x)
4967 return true
4968 }
4969
4970
4971 for {
4972 y := v_0
4973 if v_2.Op != OpAMD64FlagLT_ULT {
4974 break
4975 }
4976 v.copyOf(y)
4977 return true
4978 }
4979
4980
4981 for {
4982 y := v_0
4983 if v_2.Op != OpAMD64FlagLT_UGT {
4984 break
4985 }
4986 v.copyOf(y)
4987 return true
4988 }
4989 return false
4990 }
4991 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4992 v_2 := v.Args[2]
4993 v_1 := v.Args[1]
4994 v_0 := v.Args[0]
4995
4996
4997 for {
4998 x := v_0
4999 y := v_1
5000 if v_2.Op != OpAMD64InvertFlags {
5001 break
5002 }
5003 cond := v_2.Args[0]
5004 v.reset(OpAMD64CMOVLCS)
5005 v.AddArg3(x, y, cond)
5006 return true
5007 }
5008
5009
5010 for {
5011 y := v_0
5012 if v_2.Op != OpAMD64FlagEQ {
5013 break
5014 }
5015 v.copyOf(y)
5016 return true
5017 }
5018
5019
5020 for {
5021 x := v_1
5022 if v_2.Op != OpAMD64FlagGT_UGT {
5023 break
5024 }
5025 v.copyOf(x)
5026 return true
5027 }
5028
5029
5030 for {
5031 y := v_0
5032 if v_2.Op != OpAMD64FlagGT_ULT {
5033 break
5034 }
5035 v.copyOf(y)
5036 return true
5037 }
5038
5039
5040 for {
5041 y := v_0
5042 if v_2.Op != OpAMD64FlagLT_ULT {
5043 break
5044 }
5045 v.copyOf(y)
5046 return true
5047 }
5048
5049
5050 for {
5051 x := v_1
5052 if v_2.Op != OpAMD64FlagLT_UGT {
5053 break
5054 }
5055 v.copyOf(x)
5056 return true
5057 }
5058 return false
5059 }
5060 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
5061 v_2 := v.Args[2]
5062 v_1 := v.Args[1]
5063 v_0 := v.Args[0]
5064
5065
5066 for {
5067 x := v_0
5068 y := v_1
5069 if v_2.Op != OpAMD64InvertFlags {
5070 break
5071 }
5072 cond := v_2.Args[0]
5073 v.reset(OpAMD64CMOVLGE)
5074 v.AddArg3(x, y, cond)
5075 return true
5076 }
5077
5078
5079 for {
5080 x := v_1
5081 if v_2.Op != OpAMD64FlagEQ {
5082 break
5083 }
5084 v.copyOf(x)
5085 return true
5086 }
5087
5088
5089 for {
5090 y := v_0
5091 if v_2.Op != OpAMD64FlagGT_UGT {
5092 break
5093 }
5094 v.copyOf(y)
5095 return true
5096 }
5097
5098
5099 for {
5100 y := v_0
5101 if v_2.Op != OpAMD64FlagGT_ULT {
5102 break
5103 }
5104 v.copyOf(y)
5105 return true
5106 }
5107
5108
5109 for {
5110 x := v_1
5111 if v_2.Op != OpAMD64FlagLT_ULT {
5112 break
5113 }
5114 v.copyOf(x)
5115 return true
5116 }
5117
5118
5119 for {
5120 x := v_1
5121 if v_2.Op != OpAMD64FlagLT_UGT {
5122 break
5123 }
5124 v.copyOf(x)
5125 return true
5126 }
5127 return false
5128 }
5129 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
5130 v_2 := v.Args[2]
5131 v_1 := v.Args[1]
5132 v_0 := v.Args[0]
5133
5134
5135 for {
5136 x := v_0
5137 y := v_1
5138 if v_2.Op != OpAMD64InvertFlags {
5139 break
5140 }
5141 cond := v_2.Args[0]
5142 v.reset(OpAMD64CMOVLCC)
5143 v.AddArg3(x, y, cond)
5144 return true
5145 }
5146
5147
5148 for {
5149 x := v_1
5150 if v_2.Op != OpAMD64FlagEQ {
5151 break
5152 }
5153 v.copyOf(x)
5154 return true
5155 }
5156
5157
5158 for {
5159 y := v_0
5160 if v_2.Op != OpAMD64FlagGT_UGT {
5161 break
5162 }
5163 v.copyOf(y)
5164 return true
5165 }
5166
5167
5168 for {
5169 x := v_1
5170 if v_2.Op != OpAMD64FlagGT_ULT {
5171 break
5172 }
5173 v.copyOf(x)
5174 return true
5175 }
5176
5177
5178 for {
5179 x := v_1
5180 if v_2.Op != OpAMD64FlagLT_ULT {
5181 break
5182 }
5183 v.copyOf(x)
5184 return true
5185 }
5186
5187
5188 for {
5189 y := v_0
5190 if v_2.Op != OpAMD64FlagLT_UGT {
5191 break
5192 }
5193 v.copyOf(y)
5194 return true
5195 }
5196 return false
5197 }
5198 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
5199 v_2 := v.Args[2]
5200 v_1 := v.Args[1]
5201 v_0 := v.Args[0]
5202
5203
5204 for {
5205 x := v_0
5206 y := v_1
5207 if v_2.Op != OpAMD64InvertFlags {
5208 break
5209 }
5210 cond := v_2.Args[0]
5211 v.reset(OpAMD64CMOVLGT)
5212 v.AddArg3(x, y, cond)
5213 return true
5214 }
5215
5216
5217 for {
5218 y := v_0
5219 if v_2.Op != OpAMD64FlagEQ {
5220 break
5221 }
5222 v.copyOf(y)
5223 return true
5224 }
5225
5226
5227 for {
5228 y := v_0
5229 if v_2.Op != OpAMD64FlagGT_UGT {
5230 break
5231 }
5232 v.copyOf(y)
5233 return true
5234 }
5235
5236
5237 for {
5238 y := v_0
5239 if v_2.Op != OpAMD64FlagGT_ULT {
5240 break
5241 }
5242 v.copyOf(y)
5243 return true
5244 }
5245
5246
5247 for {
5248 x := v_1
5249 if v_2.Op != OpAMD64FlagLT_ULT {
5250 break
5251 }
5252 v.copyOf(x)
5253 return true
5254 }
5255
5256
5257 for {
5258 x := v_1
5259 if v_2.Op != OpAMD64FlagLT_UGT {
5260 break
5261 }
5262 v.copyOf(x)
5263 return true
5264 }
5265 return false
5266 }
5267 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
5268 v_2 := v.Args[2]
5269 v_1 := v.Args[1]
5270 v_0 := v.Args[0]
5271
5272
5273 for {
5274 x := v_0
5275 y := v_1
5276 if v_2.Op != OpAMD64InvertFlags {
5277 break
5278 }
5279 cond := v_2.Args[0]
5280 v.reset(OpAMD64CMOVLNE)
5281 v.AddArg3(x, y, cond)
5282 return true
5283 }
5284
5285
5286 for {
5287 y := v_0
5288 if v_2.Op != OpAMD64FlagEQ {
5289 break
5290 }
5291 v.copyOf(y)
5292 return true
5293 }
5294
5295
5296 for {
5297 x := v_1
5298 if v_2.Op != OpAMD64FlagGT_UGT {
5299 break
5300 }
5301 v.copyOf(x)
5302 return true
5303 }
5304
5305
5306 for {
5307 x := v_1
5308 if v_2.Op != OpAMD64FlagGT_ULT {
5309 break
5310 }
5311 v.copyOf(x)
5312 return true
5313 }
5314
5315
5316 for {
5317 x := v_1
5318 if v_2.Op != OpAMD64FlagLT_ULT {
5319 break
5320 }
5321 v.copyOf(x)
5322 return true
5323 }
5324
5325
5326 for {
5327 x := v_1
5328 if v_2.Op != OpAMD64FlagLT_UGT {
5329 break
5330 }
5331 v.copyOf(x)
5332 return true
5333 }
5334 return false
5335 }
5336 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
5337 v_2 := v.Args[2]
5338 v_1 := v.Args[1]
5339 v_0 := v.Args[0]
5340
5341
5342 for {
5343 x := v_0
5344 y := v_1
5345 if v_2.Op != OpAMD64InvertFlags {
5346 break
5347 }
5348 cond := v_2.Args[0]
5349 v.reset(OpAMD64CMOVQLS)
5350 v.AddArg3(x, y, cond)
5351 return true
5352 }
5353
5354
5355 for {
5356 x := v_1
5357 if v_2.Op != OpAMD64FlagEQ {
5358 break
5359 }
5360 v.copyOf(x)
5361 return true
5362 }
5363
5364
5365 for {
5366 x := v_1
5367 if v_2.Op != OpAMD64FlagGT_UGT {
5368 break
5369 }
5370 v.copyOf(x)
5371 return true
5372 }
5373
5374
5375 for {
5376 y := v_0
5377 if v_2.Op != OpAMD64FlagGT_ULT {
5378 break
5379 }
5380 v.copyOf(y)
5381 return true
5382 }
5383
5384
5385 for {
5386 y := v_0
5387 if v_2.Op != OpAMD64FlagLT_ULT {
5388 break
5389 }
5390 v.copyOf(y)
5391 return true
5392 }
5393
5394
5395 for {
5396 x := v_1
5397 if v_2.Op != OpAMD64FlagLT_UGT {
5398 break
5399 }
5400 v.copyOf(x)
5401 return true
5402 }
5403 return false
5404 }
5405 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
5406 v_2 := v.Args[2]
5407 v_1 := v.Args[1]
5408 v_0 := v.Args[0]
5409
5410
5411 for {
5412 x := v_0
5413 y := v_1
5414 if v_2.Op != OpAMD64InvertFlags {
5415 break
5416 }
5417 cond := v_2.Args[0]
5418 v.reset(OpAMD64CMOVQHI)
5419 v.AddArg3(x, y, cond)
5420 return true
5421 }
5422
5423
5424 for {
5425 y := v_0
5426 if v_2.Op != OpAMD64FlagEQ {
5427 break
5428 }
5429 v.copyOf(y)
5430 return true
5431 }
5432
5433
5434 for {
5435 y := v_0
5436 if v_2.Op != OpAMD64FlagGT_UGT {
5437 break
5438 }
5439 v.copyOf(y)
5440 return true
5441 }
5442
5443
5444 for {
5445 x := v_1
5446 if v_2.Op != OpAMD64FlagGT_ULT {
5447 break
5448 }
5449 v.copyOf(x)
5450 return true
5451 }
5452
5453
5454 for {
5455 x := v_1
5456 if v_2.Op != OpAMD64FlagLT_ULT {
5457 break
5458 }
5459 v.copyOf(x)
5460 return true
5461 }
5462
5463
5464 for {
5465 y := v_0
5466 if v_2.Op != OpAMD64FlagLT_UGT {
5467 break
5468 }
5469 v.copyOf(y)
5470 return true
5471 }
5472 return false
5473 }
5474 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5475 v_2 := v.Args[2]
5476 v_1 := v.Args[1]
5477 v_0 := v.Args[0]
5478
5479
5480 for {
5481 x := v_0
5482 y := v_1
5483 if v_2.Op != OpAMD64InvertFlags {
5484 break
5485 }
5486 cond := v_2.Args[0]
5487 v.reset(OpAMD64CMOVQEQ)
5488 v.AddArg3(x, y, cond)
5489 return true
5490 }
5491
5492
5493 for {
5494 x := v_1
5495 if v_2.Op != OpAMD64FlagEQ {
5496 break
5497 }
5498 v.copyOf(x)
5499 return true
5500 }
5501
5502
5503 for {
5504 y := v_0
5505 if v_2.Op != OpAMD64FlagGT_UGT {
5506 break
5507 }
5508 v.copyOf(y)
5509 return true
5510 }
5511
5512
5513 for {
5514 y := v_0
5515 if v_2.Op != OpAMD64FlagGT_ULT {
5516 break
5517 }
5518 v.copyOf(y)
5519 return true
5520 }
5521
5522
5523 for {
5524 y := v_0
5525 if v_2.Op != OpAMD64FlagLT_ULT {
5526 break
5527 }
5528 v.copyOf(y)
5529 return true
5530 }
5531
5532
5533 for {
5534 y := v_0
5535 if v_2.Op != OpAMD64FlagLT_UGT {
5536 break
5537 }
5538 v.copyOf(y)
5539 return true
5540 }
5541
5542
5543
5544 for {
5545 x := v_0
5546 if v_2.Op != OpSelect1 {
5547 break
5548 }
5549 v_2_0 := v_2.Args[0]
5550 if v_2_0.Op != OpAMD64BSFQ {
5551 break
5552 }
5553 v_2_0_0 := v_2_0.Args[0]
5554 if v_2_0_0.Op != OpAMD64ORQconst {
5555 break
5556 }
5557 c := v_2_0_0.AuxInt
5558 if !(c != 0) {
5559 break
5560 }
5561 v.copyOf(x)
5562 return true
5563 }
5564 return false
5565 }
5566 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5567 v_2 := v.Args[2]
5568 v_1 := v.Args[1]
5569 v_0 := v.Args[0]
5570
5571
5572 for {
5573 x := v_0
5574 y := v_1
5575 if v_2.Op != OpAMD64InvertFlags {
5576 break
5577 }
5578 cond := v_2.Args[0]
5579 v.reset(OpAMD64CMOVQLE)
5580 v.AddArg3(x, y, cond)
5581 return true
5582 }
5583
5584
5585 for {
5586 x := v_1
5587 if v_2.Op != OpAMD64FlagEQ {
5588 break
5589 }
5590 v.copyOf(x)
5591 return true
5592 }
5593
5594
5595 for {
5596 x := v_1
5597 if v_2.Op != OpAMD64FlagGT_UGT {
5598 break
5599 }
5600 v.copyOf(x)
5601 return true
5602 }
5603
5604
5605 for {
5606 x := v_1
5607 if v_2.Op != OpAMD64FlagGT_ULT {
5608 break
5609 }
5610 v.copyOf(x)
5611 return true
5612 }
5613
5614
5615 for {
5616 y := v_0
5617 if v_2.Op != OpAMD64FlagLT_ULT {
5618 break
5619 }
5620 v.copyOf(y)
5621 return true
5622 }
5623
5624
5625 for {
5626 y := v_0
5627 if v_2.Op != OpAMD64FlagLT_UGT {
5628 break
5629 }
5630 v.copyOf(y)
5631 return true
5632 }
5633 return false
5634 }
5635 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5636 v_2 := v.Args[2]
5637 v_1 := v.Args[1]
5638 v_0 := v.Args[0]
5639
5640
5641 for {
5642 x := v_0
5643 y := v_1
5644 if v_2.Op != OpAMD64InvertFlags {
5645 break
5646 }
5647 cond := v_2.Args[0]
5648 v.reset(OpAMD64CMOVQLT)
5649 v.AddArg3(x, y, cond)
5650 return true
5651 }
5652
5653
5654 for {
5655 y := v_0
5656 if v_2.Op != OpAMD64FlagEQ {
5657 break
5658 }
5659 v.copyOf(y)
5660 return true
5661 }
5662
5663
5664 for {
5665 x := v_1
5666 if v_2.Op != OpAMD64FlagGT_UGT {
5667 break
5668 }
5669 v.copyOf(x)
5670 return true
5671 }
5672
5673
5674 for {
5675 x := v_1
5676 if v_2.Op != OpAMD64FlagGT_ULT {
5677 break
5678 }
5679 v.copyOf(x)
5680 return true
5681 }
5682
5683
5684 for {
5685 y := v_0
5686 if v_2.Op != OpAMD64FlagLT_ULT {
5687 break
5688 }
5689 v.copyOf(y)
5690 return true
5691 }
5692
5693
5694 for {
5695 y := v_0
5696 if v_2.Op != OpAMD64FlagLT_UGT {
5697 break
5698 }
5699 v.copyOf(y)
5700 return true
5701 }
5702 return false
5703 }
5704 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5705 v_2 := v.Args[2]
5706 v_1 := v.Args[1]
5707 v_0 := v.Args[0]
5708
5709
5710 for {
5711 x := v_0
5712 y := v_1
5713 if v_2.Op != OpAMD64InvertFlags {
5714 break
5715 }
5716 cond := v_2.Args[0]
5717 v.reset(OpAMD64CMOVQCS)
5718 v.AddArg3(x, y, cond)
5719 return true
5720 }
5721
5722
5723 for {
5724 y := v_0
5725 if v_2.Op != OpAMD64FlagEQ {
5726 break
5727 }
5728 v.copyOf(y)
5729 return true
5730 }
5731
5732
5733 for {
5734 x := v_1
5735 if v_2.Op != OpAMD64FlagGT_UGT {
5736 break
5737 }
5738 v.copyOf(x)
5739 return true
5740 }
5741
5742
5743 for {
5744 y := v_0
5745 if v_2.Op != OpAMD64FlagGT_ULT {
5746 break
5747 }
5748 v.copyOf(y)
5749 return true
5750 }
5751
5752
5753 for {
5754 y := v_0
5755 if v_2.Op != OpAMD64FlagLT_ULT {
5756 break
5757 }
5758 v.copyOf(y)
5759 return true
5760 }
5761
5762
5763 for {
5764 x := v_1
5765 if v_2.Op != OpAMD64FlagLT_UGT {
5766 break
5767 }
5768 v.copyOf(x)
5769 return true
5770 }
5771 return false
5772 }
5773 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5774 v_2 := v.Args[2]
5775 v_1 := v.Args[1]
5776 v_0 := v.Args[0]
5777
5778
5779 for {
5780 x := v_0
5781 y := v_1
5782 if v_2.Op != OpAMD64InvertFlags {
5783 break
5784 }
5785 cond := v_2.Args[0]
5786 v.reset(OpAMD64CMOVQGE)
5787 v.AddArg3(x, y, cond)
5788 return true
5789 }
5790
5791
5792 for {
5793 x := v_1
5794 if v_2.Op != OpAMD64FlagEQ {
5795 break
5796 }
5797 v.copyOf(x)
5798 return true
5799 }
5800
5801
5802 for {
5803 y := v_0
5804 if v_2.Op != OpAMD64FlagGT_UGT {
5805 break
5806 }
5807 v.copyOf(y)
5808 return true
5809 }
5810
5811
5812 for {
5813 y := v_0
5814 if v_2.Op != OpAMD64FlagGT_ULT {
5815 break
5816 }
5817 v.copyOf(y)
5818 return true
5819 }
5820
5821
5822 for {
5823 x := v_1
5824 if v_2.Op != OpAMD64FlagLT_ULT {
5825 break
5826 }
5827 v.copyOf(x)
5828 return true
5829 }
5830
5831
5832 for {
5833 x := v_1
5834 if v_2.Op != OpAMD64FlagLT_UGT {
5835 break
5836 }
5837 v.copyOf(x)
5838 return true
5839 }
5840 return false
5841 }
5842 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5843 v_2 := v.Args[2]
5844 v_1 := v.Args[1]
5845 v_0 := v.Args[0]
5846
5847
5848 for {
5849 x := v_0
5850 y := v_1
5851 if v_2.Op != OpAMD64InvertFlags {
5852 break
5853 }
5854 cond := v_2.Args[0]
5855 v.reset(OpAMD64CMOVQCC)
5856 v.AddArg3(x, y, cond)
5857 return true
5858 }
5859
5860
5861 for {
5862 x := v_1
5863 if v_2.Op != OpAMD64FlagEQ {
5864 break
5865 }
5866 v.copyOf(x)
5867 return true
5868 }
5869
5870
5871 for {
5872 y := v_0
5873 if v_2.Op != OpAMD64FlagGT_UGT {
5874 break
5875 }
5876 v.copyOf(y)
5877 return true
5878 }
5879
5880
5881 for {
5882 x := v_1
5883 if v_2.Op != OpAMD64FlagGT_ULT {
5884 break
5885 }
5886 v.copyOf(x)
5887 return true
5888 }
5889
5890
5891 for {
5892 x := v_1
5893 if v_2.Op != OpAMD64FlagLT_ULT {
5894 break
5895 }
5896 v.copyOf(x)
5897 return true
5898 }
5899
5900
5901 for {
5902 y := v_0
5903 if v_2.Op != OpAMD64FlagLT_UGT {
5904 break
5905 }
5906 v.copyOf(y)
5907 return true
5908 }
5909 return false
5910 }
5911 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5912 v_2 := v.Args[2]
5913 v_1 := v.Args[1]
5914 v_0 := v.Args[0]
5915
5916
5917 for {
5918 x := v_0
5919 y := v_1
5920 if v_2.Op != OpAMD64InvertFlags {
5921 break
5922 }
5923 cond := v_2.Args[0]
5924 v.reset(OpAMD64CMOVQGT)
5925 v.AddArg3(x, y, cond)
5926 return true
5927 }
5928
5929
5930 for {
5931 y := v_0
5932 if v_2.Op != OpAMD64FlagEQ {
5933 break
5934 }
5935 v.copyOf(y)
5936 return true
5937 }
5938
5939
5940 for {
5941 y := v_0
5942 if v_2.Op != OpAMD64FlagGT_UGT {
5943 break
5944 }
5945 v.copyOf(y)
5946 return true
5947 }
5948
5949
5950 for {
5951 y := v_0
5952 if v_2.Op != OpAMD64FlagGT_ULT {
5953 break
5954 }
5955 v.copyOf(y)
5956 return true
5957 }
5958
5959
5960 for {
5961 x := v_1
5962 if v_2.Op != OpAMD64FlagLT_ULT {
5963 break
5964 }
5965 v.copyOf(x)
5966 return true
5967 }
5968
5969
5970 for {
5971 x := v_1
5972 if v_2.Op != OpAMD64FlagLT_UGT {
5973 break
5974 }
5975 v.copyOf(x)
5976 return true
5977 }
5978 return false
5979 }
5980 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5981 v_2 := v.Args[2]
5982 v_1 := v.Args[1]
5983 v_0 := v.Args[0]
5984
5985
5986 for {
5987 x := v_0
5988 y := v_1
5989 if v_2.Op != OpAMD64InvertFlags {
5990 break
5991 }
5992 cond := v_2.Args[0]
5993 v.reset(OpAMD64CMOVQNE)
5994 v.AddArg3(x, y, cond)
5995 return true
5996 }
5997
5998
5999 for {
6000 y := v_0
6001 if v_2.Op != OpAMD64FlagEQ {
6002 break
6003 }
6004 v.copyOf(y)
6005 return true
6006 }
6007
6008
6009 for {
6010 x := v_1
6011 if v_2.Op != OpAMD64FlagGT_UGT {
6012 break
6013 }
6014 v.copyOf(x)
6015 return true
6016 }
6017
6018
6019 for {
6020 x := v_1
6021 if v_2.Op != OpAMD64FlagGT_ULT {
6022 break
6023 }
6024 v.copyOf(x)
6025 return true
6026 }
6027
6028
6029 for {
6030 x := v_1
6031 if v_2.Op != OpAMD64FlagLT_ULT {
6032 break
6033 }
6034 v.copyOf(x)
6035 return true
6036 }
6037
6038
6039 for {
6040 x := v_1
6041 if v_2.Op != OpAMD64FlagLT_UGT {
6042 break
6043 }
6044 v.copyOf(x)
6045 return true
6046 }
6047 return false
6048 }
6049 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
6050 v_2 := v.Args[2]
6051 v_1 := v.Args[1]
6052 v_0 := v.Args[0]
6053
6054
6055 for {
6056 x := v_0
6057 y := v_1
6058 if v_2.Op != OpAMD64InvertFlags {
6059 break
6060 }
6061 cond := v_2.Args[0]
6062 v.reset(OpAMD64CMOVWLS)
6063 v.AddArg3(x, y, cond)
6064 return true
6065 }
6066
6067
6068 for {
6069 x := v_1
6070 if v_2.Op != OpAMD64FlagEQ {
6071 break
6072 }
6073 v.copyOf(x)
6074 return true
6075 }
6076
6077
6078 for {
6079 x := v_1
6080 if v_2.Op != OpAMD64FlagGT_UGT {
6081 break
6082 }
6083 v.copyOf(x)
6084 return true
6085 }
6086
6087
6088 for {
6089 y := v_0
6090 if v_2.Op != OpAMD64FlagGT_ULT {
6091 break
6092 }
6093 v.copyOf(y)
6094 return true
6095 }
6096
6097
6098 for {
6099 y := v_0
6100 if v_2.Op != OpAMD64FlagLT_ULT {
6101 break
6102 }
6103 v.copyOf(y)
6104 return true
6105 }
6106
6107
6108 for {
6109 x := v_1
6110 if v_2.Op != OpAMD64FlagLT_UGT {
6111 break
6112 }
6113 v.copyOf(x)
6114 return true
6115 }
6116 return false
6117 }
6118 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
6119 v_2 := v.Args[2]
6120 v_1 := v.Args[1]
6121 v_0 := v.Args[0]
6122
6123
6124 for {
6125 x := v_0
6126 y := v_1
6127 if v_2.Op != OpAMD64InvertFlags {
6128 break
6129 }
6130 cond := v_2.Args[0]
6131 v.reset(OpAMD64CMOVWHI)
6132 v.AddArg3(x, y, cond)
6133 return true
6134 }
6135
6136
6137 for {
6138 y := v_0
6139 if v_2.Op != OpAMD64FlagEQ {
6140 break
6141 }
6142 v.copyOf(y)
6143 return true
6144 }
6145
6146
6147 for {
6148 y := v_0
6149 if v_2.Op != OpAMD64FlagGT_UGT {
6150 break
6151 }
6152 v.copyOf(y)
6153 return true
6154 }
6155
6156
6157 for {
6158 x := v_1
6159 if v_2.Op != OpAMD64FlagGT_ULT {
6160 break
6161 }
6162 v.copyOf(x)
6163 return true
6164 }
6165
6166
6167 for {
6168 x := v_1
6169 if v_2.Op != OpAMD64FlagLT_ULT {
6170 break
6171 }
6172 v.copyOf(x)
6173 return true
6174 }
6175
6176
6177 for {
6178 y := v_0
6179 if v_2.Op != OpAMD64FlagLT_UGT {
6180 break
6181 }
6182 v.copyOf(y)
6183 return true
6184 }
6185 return false
6186 }
6187 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
6188 v_2 := v.Args[2]
6189 v_1 := v.Args[1]
6190 v_0 := v.Args[0]
6191
6192
6193 for {
6194 x := v_0
6195 y := v_1
6196 if v_2.Op != OpAMD64InvertFlags {
6197 break
6198 }
6199 cond := v_2.Args[0]
6200 v.reset(OpAMD64CMOVWEQ)
6201 v.AddArg3(x, y, cond)
6202 return true
6203 }
6204
6205
6206 for {
6207 x := v_1
6208 if v_2.Op != OpAMD64FlagEQ {
6209 break
6210 }
6211 v.copyOf(x)
6212 return true
6213 }
6214
6215
6216 for {
6217 y := v_0
6218 if v_2.Op != OpAMD64FlagGT_UGT {
6219 break
6220 }
6221 v.copyOf(y)
6222 return true
6223 }
6224
6225
6226 for {
6227 y := v_0
6228 if v_2.Op != OpAMD64FlagGT_ULT {
6229 break
6230 }
6231 v.copyOf(y)
6232 return true
6233 }
6234
6235
6236 for {
6237 y := v_0
6238 if v_2.Op != OpAMD64FlagLT_ULT {
6239 break
6240 }
6241 v.copyOf(y)
6242 return true
6243 }
6244
6245
6246 for {
6247 y := v_0
6248 if v_2.Op != OpAMD64FlagLT_UGT {
6249 break
6250 }
6251 v.copyOf(y)
6252 return true
6253 }
6254 return false
6255 }
6256 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6257 v_2 := v.Args[2]
6258 v_1 := v.Args[1]
6259 v_0 := v.Args[0]
6260
6261
6262 for {
6263 x := v_0
6264 y := v_1
6265 if v_2.Op != OpAMD64InvertFlags {
6266 break
6267 }
6268 cond := v_2.Args[0]
6269 v.reset(OpAMD64CMOVWLE)
6270 v.AddArg3(x, y, cond)
6271 return true
6272 }
6273
6274
6275 for {
6276 x := v_1
6277 if v_2.Op != OpAMD64FlagEQ {
6278 break
6279 }
6280 v.copyOf(x)
6281 return true
6282 }
6283
6284
6285 for {
6286 x := v_1
6287 if v_2.Op != OpAMD64FlagGT_UGT {
6288 break
6289 }
6290 v.copyOf(x)
6291 return true
6292 }
6293
6294
6295 for {
6296 x := v_1
6297 if v_2.Op != OpAMD64FlagGT_ULT {
6298 break
6299 }
6300 v.copyOf(x)
6301 return true
6302 }
6303
6304
6305 for {
6306 y := v_0
6307 if v_2.Op != OpAMD64FlagLT_ULT {
6308 break
6309 }
6310 v.copyOf(y)
6311 return true
6312 }
6313
6314
6315 for {
6316 y := v_0
6317 if v_2.Op != OpAMD64FlagLT_UGT {
6318 break
6319 }
6320 v.copyOf(y)
6321 return true
6322 }
6323 return false
6324 }
6325 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6326 v_2 := v.Args[2]
6327 v_1 := v.Args[1]
6328 v_0 := v.Args[0]
6329
6330
6331 for {
6332 x := v_0
6333 y := v_1
6334 if v_2.Op != OpAMD64InvertFlags {
6335 break
6336 }
6337 cond := v_2.Args[0]
6338 v.reset(OpAMD64CMOVWLT)
6339 v.AddArg3(x, y, cond)
6340 return true
6341 }
6342
6343
6344 for {
6345 y := v_0
6346 if v_2.Op != OpAMD64FlagEQ {
6347 break
6348 }
6349 v.copyOf(y)
6350 return true
6351 }
6352
6353
6354 for {
6355 x := v_1
6356 if v_2.Op != OpAMD64FlagGT_UGT {
6357 break
6358 }
6359 v.copyOf(x)
6360 return true
6361 }
6362
6363
6364 for {
6365 x := v_1
6366 if v_2.Op != OpAMD64FlagGT_ULT {
6367 break
6368 }
6369 v.copyOf(x)
6370 return true
6371 }
6372
6373
6374 for {
6375 y := v_0
6376 if v_2.Op != OpAMD64FlagLT_ULT {
6377 break
6378 }
6379 v.copyOf(y)
6380 return true
6381 }
6382
6383
6384 for {
6385 y := v_0
6386 if v_2.Op != OpAMD64FlagLT_UGT {
6387 break
6388 }
6389 v.copyOf(y)
6390 return true
6391 }
6392 return false
6393 }
6394 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6395 v_2 := v.Args[2]
6396 v_1 := v.Args[1]
6397 v_0 := v.Args[0]
6398
6399
6400 for {
6401 x := v_0
6402 y := v_1
6403 if v_2.Op != OpAMD64InvertFlags {
6404 break
6405 }
6406 cond := v_2.Args[0]
6407 v.reset(OpAMD64CMOVWCS)
6408 v.AddArg3(x, y, cond)
6409 return true
6410 }
6411
6412
6413 for {
6414 y := v_0
6415 if v_2.Op != OpAMD64FlagEQ {
6416 break
6417 }
6418 v.copyOf(y)
6419 return true
6420 }
6421
6422
6423 for {
6424 x := v_1
6425 if v_2.Op != OpAMD64FlagGT_UGT {
6426 break
6427 }
6428 v.copyOf(x)
6429 return true
6430 }
6431
6432
6433 for {
6434 y := v_0
6435 if v_2.Op != OpAMD64FlagGT_ULT {
6436 break
6437 }
6438 v.copyOf(y)
6439 return true
6440 }
6441
6442
6443 for {
6444 y := v_0
6445 if v_2.Op != OpAMD64FlagLT_ULT {
6446 break
6447 }
6448 v.copyOf(y)
6449 return true
6450 }
6451
6452
6453 for {
6454 x := v_1
6455 if v_2.Op != OpAMD64FlagLT_UGT {
6456 break
6457 }
6458 v.copyOf(x)
6459 return true
6460 }
6461 return false
6462 }
6463 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6464 v_2 := v.Args[2]
6465 v_1 := v.Args[1]
6466 v_0 := v.Args[0]
6467
6468
6469 for {
6470 x := v_0
6471 y := v_1
6472 if v_2.Op != OpAMD64InvertFlags {
6473 break
6474 }
6475 cond := v_2.Args[0]
6476 v.reset(OpAMD64CMOVWGE)
6477 v.AddArg3(x, y, cond)
6478 return true
6479 }
6480
6481
6482 for {
6483 x := v_1
6484 if v_2.Op != OpAMD64FlagEQ {
6485 break
6486 }
6487 v.copyOf(x)
6488 return true
6489 }
6490
6491
6492 for {
6493 y := v_0
6494 if v_2.Op != OpAMD64FlagGT_UGT {
6495 break
6496 }
6497 v.copyOf(y)
6498 return true
6499 }
6500
6501
6502 for {
6503 y := v_0
6504 if v_2.Op != OpAMD64FlagGT_ULT {
6505 break
6506 }
6507 v.copyOf(y)
6508 return true
6509 }
6510
6511
6512 for {
6513 x := v_1
6514 if v_2.Op != OpAMD64FlagLT_ULT {
6515 break
6516 }
6517 v.copyOf(x)
6518 return true
6519 }
6520
6521
6522 for {
6523 x := v_1
6524 if v_2.Op != OpAMD64FlagLT_UGT {
6525 break
6526 }
6527 v.copyOf(x)
6528 return true
6529 }
6530 return false
6531 }
6532 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6533 v_2 := v.Args[2]
6534 v_1 := v.Args[1]
6535 v_0 := v.Args[0]
6536
6537
6538 for {
6539 x := v_0
6540 y := v_1
6541 if v_2.Op != OpAMD64InvertFlags {
6542 break
6543 }
6544 cond := v_2.Args[0]
6545 v.reset(OpAMD64CMOVWCC)
6546 v.AddArg3(x, y, cond)
6547 return true
6548 }
6549
6550
6551 for {
6552 x := v_1
6553 if v_2.Op != OpAMD64FlagEQ {
6554 break
6555 }
6556 v.copyOf(x)
6557 return true
6558 }
6559
6560
6561 for {
6562 y := v_0
6563 if v_2.Op != OpAMD64FlagGT_UGT {
6564 break
6565 }
6566 v.copyOf(y)
6567 return true
6568 }
6569
6570
6571 for {
6572 x := v_1
6573 if v_2.Op != OpAMD64FlagGT_ULT {
6574 break
6575 }
6576 v.copyOf(x)
6577 return true
6578 }
6579
6580
6581 for {
6582 x := v_1
6583 if v_2.Op != OpAMD64FlagLT_ULT {
6584 break
6585 }
6586 v.copyOf(x)
6587 return true
6588 }
6589
6590
6591 for {
6592 y := v_0
6593 if v_2.Op != OpAMD64FlagLT_UGT {
6594 break
6595 }
6596 v.copyOf(y)
6597 return true
6598 }
6599 return false
6600 }
6601 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6602 v_2 := v.Args[2]
6603 v_1 := v.Args[1]
6604 v_0 := v.Args[0]
6605
6606
6607 for {
6608 x := v_0
6609 y := v_1
6610 if v_2.Op != OpAMD64InvertFlags {
6611 break
6612 }
6613 cond := v_2.Args[0]
6614 v.reset(OpAMD64CMOVWGT)
6615 v.AddArg3(x, y, cond)
6616 return true
6617 }
6618
6619
6620 for {
6621 y := v_0
6622 if v_2.Op != OpAMD64FlagEQ {
6623 break
6624 }
6625 v.copyOf(y)
6626 return true
6627 }
6628
6629
6630 for {
6631 y := v_0
6632 if v_2.Op != OpAMD64FlagGT_UGT {
6633 break
6634 }
6635 v.copyOf(y)
6636 return true
6637 }
6638
6639
6640 for {
6641 y := v_0
6642 if v_2.Op != OpAMD64FlagGT_ULT {
6643 break
6644 }
6645 v.copyOf(y)
6646 return true
6647 }
6648
6649
6650 for {
6651 x := v_1
6652 if v_2.Op != OpAMD64FlagLT_ULT {
6653 break
6654 }
6655 v.copyOf(x)
6656 return true
6657 }
6658
6659
6660 for {
6661 x := v_1
6662 if v_2.Op != OpAMD64FlagLT_UGT {
6663 break
6664 }
6665 v.copyOf(x)
6666 return true
6667 }
6668 return false
6669 }
6670 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6671 v_2 := v.Args[2]
6672 v_1 := v.Args[1]
6673 v_0 := v.Args[0]
6674
6675
6676 for {
6677 x := v_0
6678 y := v_1
6679 if v_2.Op != OpAMD64InvertFlags {
6680 break
6681 }
6682 cond := v_2.Args[0]
6683 v.reset(OpAMD64CMOVWNE)
6684 v.AddArg3(x, y, cond)
6685 return true
6686 }
6687
6688
6689 for {
6690 y := v_0
6691 if v_2.Op != OpAMD64FlagEQ {
6692 break
6693 }
6694 v.copyOf(y)
6695 return true
6696 }
6697
6698
6699 for {
6700 x := v_1
6701 if v_2.Op != OpAMD64FlagGT_UGT {
6702 break
6703 }
6704 v.copyOf(x)
6705 return true
6706 }
6707
6708
6709 for {
6710 x := v_1
6711 if v_2.Op != OpAMD64FlagGT_ULT {
6712 break
6713 }
6714 v.copyOf(x)
6715 return true
6716 }
6717
6718
6719 for {
6720 x := v_1
6721 if v_2.Op != OpAMD64FlagLT_ULT {
6722 break
6723 }
6724 v.copyOf(x)
6725 return true
6726 }
6727
6728
6729 for {
6730 x := v_1
6731 if v_2.Op != OpAMD64FlagLT_UGT {
6732 break
6733 }
6734 v.copyOf(x)
6735 return true
6736 }
6737 return false
6738 }
6739 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6740 v_1 := v.Args[1]
6741 v_0 := v.Args[0]
6742 b := v.Block
6743
6744
6745 for {
6746 x := v_0
6747 if v_1.Op != OpAMD64MOVLconst {
6748 break
6749 }
6750 c := v_1.AuxInt
6751 v.reset(OpAMD64CMPBconst)
6752 v.AuxInt = int64(int8(c))
6753 v.AddArg(x)
6754 return true
6755 }
6756
6757
6758 for {
6759 if v_0.Op != OpAMD64MOVLconst {
6760 break
6761 }
6762 c := v_0.AuxInt
6763 x := v_1
6764 v.reset(OpAMD64InvertFlags)
6765 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6766 v0.AuxInt = int64(int8(c))
6767 v0.AddArg(x)
6768 v.AddArg(v0)
6769 return true
6770 }
6771
6772
6773
6774 for {
6775 x := v_0
6776 y := v_1
6777 if !(x.ID > y.ID) {
6778 break
6779 }
6780 v.reset(OpAMD64InvertFlags)
6781 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6782 v0.AddArg2(y, x)
6783 v.AddArg(v0)
6784 return true
6785 }
6786
6787
6788
6789 for {
6790 l := v_0
6791 if l.Op != OpAMD64MOVBload {
6792 break
6793 }
6794 off := l.AuxInt
6795 sym := l.Aux
6796 mem := l.Args[1]
6797 ptr := l.Args[0]
6798 x := v_1
6799 if !(canMergeLoad(v, l) && clobber(l)) {
6800 break
6801 }
6802 v.reset(OpAMD64CMPBload)
6803 v.AuxInt = off
6804 v.Aux = sym
6805 v.AddArg3(ptr, x, mem)
6806 return true
6807 }
6808
6809
6810
6811 for {
6812 x := v_0
6813 l := v_1
6814 if l.Op != OpAMD64MOVBload {
6815 break
6816 }
6817 off := l.AuxInt
6818 sym := l.Aux
6819 mem := l.Args[1]
6820 ptr := l.Args[0]
6821 if !(canMergeLoad(v, l) && clobber(l)) {
6822 break
6823 }
6824 v.reset(OpAMD64InvertFlags)
6825 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6826 v0.AuxInt = off
6827 v0.Aux = sym
6828 v0.AddArg3(ptr, x, mem)
6829 v.AddArg(v0)
6830 return true
6831 }
6832 return false
6833 }
6834 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6835 v_0 := v.Args[0]
6836 b := v.Block
6837
6838
6839
6840 for {
6841 y := v.AuxInt
6842 if v_0.Op != OpAMD64MOVLconst {
6843 break
6844 }
6845 x := v_0.AuxInt
6846 if !(int8(x) == int8(y)) {
6847 break
6848 }
6849 v.reset(OpAMD64FlagEQ)
6850 return true
6851 }
6852
6853
6854
6855 for {
6856 y := v.AuxInt
6857 if v_0.Op != OpAMD64MOVLconst {
6858 break
6859 }
6860 x := v_0.AuxInt
6861 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
6862 break
6863 }
6864 v.reset(OpAMD64FlagLT_ULT)
6865 return true
6866 }
6867
6868
6869
6870 for {
6871 y := v.AuxInt
6872 if v_0.Op != OpAMD64MOVLconst {
6873 break
6874 }
6875 x := v_0.AuxInt
6876 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
6877 break
6878 }
6879 v.reset(OpAMD64FlagLT_UGT)
6880 return true
6881 }
6882
6883
6884
6885 for {
6886 y := v.AuxInt
6887 if v_0.Op != OpAMD64MOVLconst {
6888 break
6889 }
6890 x := v_0.AuxInt
6891 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
6892 break
6893 }
6894 v.reset(OpAMD64FlagGT_ULT)
6895 return true
6896 }
6897
6898
6899
6900 for {
6901 y := v.AuxInt
6902 if v_0.Op != OpAMD64MOVLconst {
6903 break
6904 }
6905 x := v_0.AuxInt
6906 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
6907 break
6908 }
6909 v.reset(OpAMD64FlagGT_UGT)
6910 return true
6911 }
6912
6913
6914
6915 for {
6916 n := v.AuxInt
6917 if v_0.Op != OpAMD64ANDLconst {
6918 break
6919 }
6920 m := v_0.AuxInt
6921 if !(0 <= int8(m) && int8(m) < int8(n)) {
6922 break
6923 }
6924 v.reset(OpAMD64FlagLT_ULT)
6925 return true
6926 }
6927
6928
6929 for {
6930 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDL {
6931 break
6932 }
6933 y := v_0.Args[1]
6934 x := v_0.Args[0]
6935 v.reset(OpAMD64TESTB)
6936 v.AddArg2(x, y)
6937 return true
6938 }
6939
6940
6941 for {
6942 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDLconst {
6943 break
6944 }
6945 c := v_0.AuxInt
6946 x := v_0.Args[0]
6947 v.reset(OpAMD64TESTBconst)
6948 v.AuxInt = int64(int8(c))
6949 v.AddArg(x)
6950 return true
6951 }
6952
6953
6954 for {
6955 if v.AuxInt != 0 {
6956 break
6957 }
6958 x := v_0
6959 v.reset(OpAMD64TESTB)
6960 v.AddArg2(x, x)
6961 return true
6962 }
6963
6964
6965
6966 for {
6967 c := auxIntToInt8(v.AuxInt)
6968 l := v_0
6969 if l.Op != OpAMD64MOVBload {
6970 break
6971 }
6972 off := auxIntToInt32(l.AuxInt)
6973 sym := auxToSym(l.Aux)
6974 mem := l.Args[1]
6975 ptr := l.Args[0]
6976 if !(l.Uses == 1 && clobber(l)) {
6977 break
6978 }
6979 b = l.Block
6980 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6981 v.copyOf(v0)
6982 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
6983 v0.Aux = symToAux(sym)
6984 v0.AddArg2(ptr, mem)
6985 return true
6986 }
6987 return false
6988 }
6989 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6990 v_1 := v.Args[1]
6991 v_0 := v.Args[0]
6992
6993
6994
6995 for {
6996 valoff1 := v.AuxInt
6997 sym := v.Aux
6998 if v_0.Op != OpAMD64ADDQconst {
6999 break
7000 }
7001 off2 := v_0.AuxInt
7002 base := v_0.Args[0]
7003 mem := v_1
7004 if !(ValAndOff(valoff1).canAdd(off2)) {
7005 break
7006 }
7007 v.reset(OpAMD64CMPBconstload)
7008 v.AuxInt = ValAndOff(valoff1).add(off2)
7009 v.Aux = sym
7010 v.AddArg2(base, mem)
7011 return true
7012 }
7013
7014
7015
7016 for {
7017 valoff1 := v.AuxInt
7018 sym1 := v.Aux
7019 if v_0.Op != OpAMD64LEAQ {
7020 break
7021 }
7022 off2 := v_0.AuxInt
7023 sym2 := v_0.Aux
7024 base := v_0.Args[0]
7025 mem := v_1
7026 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
7027 break
7028 }
7029 v.reset(OpAMD64CMPBconstload)
7030 v.AuxInt = ValAndOff(valoff1).add(off2)
7031 v.Aux = mergeSym(sym1, sym2)
7032 v.AddArg2(base, mem)
7033 return true
7034 }
7035 return false
7036 }
7037 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
7038 v_2 := v.Args[2]
7039 v_1 := v.Args[1]
7040 v_0 := v.Args[0]
7041
7042
7043
7044 for {
7045 off1 := v.AuxInt
7046 sym := v.Aux
7047 if v_0.Op != OpAMD64ADDQconst {
7048 break
7049 }
7050 off2 := v_0.AuxInt
7051 base := v_0.Args[0]
7052 val := v_1
7053 mem := v_2
7054 if !(is32Bit(off1 + off2)) {
7055 break
7056 }
7057 v.reset(OpAMD64CMPBload)
7058 v.AuxInt = off1 + off2
7059 v.Aux = sym
7060 v.AddArg3(base, val, mem)
7061 return true
7062 }
7063
7064
7065
7066 for {
7067 off1 := v.AuxInt
7068 sym1 := v.Aux
7069 if v_0.Op != OpAMD64LEAQ {
7070 break
7071 }
7072 off2 := v_0.AuxInt
7073 sym2 := v_0.Aux
7074 base := v_0.Args[0]
7075 val := v_1
7076 mem := v_2
7077 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
7078 break
7079 }
7080 v.reset(OpAMD64CMPBload)
7081 v.AuxInt = off1 + off2
7082 v.Aux = mergeSym(sym1, sym2)
7083 v.AddArg3(base, val, mem)
7084 return true
7085 }
7086
7087
7088
7089 for {
7090 off := v.AuxInt
7091 sym := v.Aux
7092 ptr := v_0
7093 if v_1.Op != OpAMD64MOVLconst {
7094 break
7095 }
7096 c := v_1.AuxInt
7097 mem := v_2
7098 if !(validValAndOff(int64(int8(c)), off)) {
7099 break
7100 }
7101 v.reset(OpAMD64CMPBconstload)
7102 v.AuxInt = makeValAndOff(int64(int8(c)), off)
7103 v.Aux = sym
7104 v.AddArg2(ptr, mem)
7105 return true
7106 }
7107 return false
7108 }
7109 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
7110 v_1 := v.Args[1]
7111 v_0 := v.Args[0]
7112 b := v.Block
7113
7114
7115 for {
7116 x := v_0
7117 if v_1.Op != OpAMD64MOVLconst {
7118 break
7119 }
7120 c := v_1.AuxInt
7121 v.reset(OpAMD64CMPLconst)
7122 v.AuxInt = c
7123 v.AddArg(x)
7124 return true
7125 }
7126
7127
7128 for {
7129 if v_0.Op != OpAMD64MOVLconst {
7130 break
7131 }
7132 c := v_0.AuxInt
7133 x := v_1
7134 v.reset(OpAMD64InvertFlags)
7135 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
7136 v0.AuxInt = c
7137 v0.AddArg(x)
7138 v.AddArg(v0)
7139 return true
7140 }
7141
7142
7143
7144 for {
7145 x := v_0
7146 y := v_1
7147 if !(x.ID > y.ID) {
7148 break
7149 }
7150 v.reset(OpAMD64InvertFlags)
7151 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
7152 v0.AddArg2(y, x)
7153 v.AddArg(v0)
7154 return true
7155 }
7156
7157
7158
7159 for {
7160 l := v_0
7161 if l.Op != OpAMD64MOVLload {
7162 break
7163 }
7164 off := l.AuxInt
7165 sym := l.Aux
7166 mem := l.Args[1]
7167 ptr := l.Args[0]
7168 x := v_1
7169 if !(canMergeLoad(v, l) && clobber(l)) {
7170 break
7171 }
7172 v.reset(OpAMD64CMPLload)
7173 v.AuxInt = off
7174 v.Aux = sym
7175 v.AddArg3(ptr, x, mem)
7176 return true
7177 }
7178
7179
7180
7181 for {
7182 x := v_0
7183 l := v_1
7184 if l.Op != OpAMD64MOVLload {
7185 break
7186 }
7187 off := l.AuxInt
7188 sym := l.Aux
7189 mem := l.Args[1]
7190 ptr := l.Args[0]
7191 if !(canMergeLoad(v, l) && clobber(l)) {
7192 break
7193 }
7194 v.reset(OpAMD64InvertFlags)
7195 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
7196 v0.AuxInt = off
7197 v0.Aux = sym
7198 v0.AddArg3(ptr, x, mem)
7199 v.AddArg(v0)
7200 return true
7201 }
7202 return false
7203 }
7204 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
7205 v_0 := v.Args[0]
7206 b := v.Block
7207
7208
7209
7210 for {
7211 y := v.AuxInt
7212 if v_0.Op != OpAMD64MOVLconst {
7213 break
7214 }
7215 x := v_0.AuxInt
7216 if !(int32(x) == int32(y)) {
7217 break
7218 }
7219 v.reset(OpAMD64FlagEQ)
7220 return true
7221 }
7222
7223
7224
7225 for {
7226 y := v.AuxInt
7227 if v_0.Op != OpAMD64MOVLconst {
7228 break
7229 }
7230 x := v_0.AuxInt
7231 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
7232 break
7233 }
7234 v.reset(OpAMD64FlagLT_ULT)
7235 return true
7236 }
7237
7238
7239
7240 for {
7241 y := v.AuxInt
7242 if v_0.Op != OpAMD64MOVLconst {
7243 break
7244 }
7245 x := v_0.AuxInt
7246 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
7247 break
7248 }
7249 v.reset(OpAMD64FlagLT_UGT)
7250 return true
7251 }
7252
7253
7254
7255 for {
7256 y := v.AuxInt
7257 if v_0.Op != OpAMD64MOVLconst {
7258 break
7259 }
7260 x := v_0.AuxInt
7261 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
7262 break
7263 }
7264 v.reset(OpAMD64FlagGT_ULT)
7265 return true
7266 }
7267
7268
7269
7270 for {
7271 y := v.AuxInt
7272 if v_0.Op != OpAMD64MOVLconst {
7273 break
7274 }
7275 x := v_0.AuxInt
7276 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
7277 break
7278 }
7279 v.reset(OpAMD64FlagGT_UGT)
7280 return true
7281 }
7282
7283
7284
7285 for {
7286 n := v.AuxInt
7287 if v_0.Op != OpAMD64SHRLconst {
7288 break
7289 }
7290 c := v_0.AuxInt
7291 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7292 break
7293 }
7294 v.reset(OpAMD64FlagLT_ULT)
7295 return true
7296 }
7297
7298
7299
7300 for {
7301 n := v.AuxInt
7302 if v_0.Op != OpAMD64ANDLconst {
7303 break
7304 }
7305 m := v_0.AuxInt
7306 if !(0 <= int32(m) && int32(m) < int32(n)) {
7307 break
7308 }
7309 v.reset(OpAMD64FlagLT_ULT)
7310 return true
7311 }
7312
7313
7314 for {
7315 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDL {
7316 break
7317 }
7318 y := v_0.Args[1]
7319 x := v_0.Args[0]
7320 v.reset(OpAMD64TESTL)
7321 v.AddArg2(x, y)
7322 return true
7323 }
7324
7325
7326 for {
7327 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDLconst {
7328 break
7329 }
7330 c := v_0.AuxInt
7331 x := v_0.Args[0]
7332 v.reset(OpAMD64TESTLconst)
7333 v.AuxInt = c
7334 v.AddArg(x)
7335 return true
7336 }
7337
7338
7339 for {
7340 if v.AuxInt != 0 {
7341 break
7342 }
7343 x := v_0
7344 v.reset(OpAMD64TESTL)
7345 v.AddArg2(x, x)
7346 return true
7347 }
7348
7349
7350
7351 for {
7352 c := auxIntToInt32(v.AuxInt)
7353 l := v_0
7354 if l.Op != OpAMD64MOVLload {
7355 break
7356 }
7357 off := auxIntToInt32(l.AuxInt)
7358 sym := auxToSym(l.Aux)
7359 mem := l.Args[1]
7360 ptr := l.Args[0]
7361 if !(l.Uses == 1 && clobber(l)) {
7362 break
7363 }
7364 b = l.Block
7365 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7366 v.copyOf(v0)
7367 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
7368 v0.Aux = symToAux(sym)
7369 v0.AddArg2(ptr, mem)
7370 return true
7371 }
7372 return false
7373 }
7374 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7375 v_1 := v.Args[1]
7376 v_0 := v.Args[0]
7377
7378
7379
7380 for {
7381 valoff1 := v.AuxInt
7382 sym := v.Aux
7383 if v_0.Op != OpAMD64ADDQconst {
7384 break
7385 }
7386 off2 := v_0.AuxInt
7387 base := v_0.Args[0]
7388 mem := v_1
7389 if !(ValAndOff(valoff1).canAdd(off2)) {
7390 break
7391 }
7392 v.reset(OpAMD64CMPLconstload)
7393 v.AuxInt = ValAndOff(valoff1).add(off2)
7394 v.Aux = sym
7395 v.AddArg2(base, mem)
7396 return true
7397 }
7398
7399
7400
7401 for {
7402 valoff1 := v.AuxInt
7403 sym1 := v.Aux
7404 if v_0.Op != OpAMD64LEAQ {
7405 break
7406 }
7407 off2 := v_0.AuxInt
7408 sym2 := v_0.Aux
7409 base := v_0.Args[0]
7410 mem := v_1
7411 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
7412 break
7413 }
7414 v.reset(OpAMD64CMPLconstload)
7415 v.AuxInt = ValAndOff(valoff1).add(off2)
7416 v.Aux = mergeSym(sym1, sym2)
7417 v.AddArg2(base, mem)
7418 return true
7419 }
7420 return false
7421 }
7422 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7423 v_2 := v.Args[2]
7424 v_1 := v.Args[1]
7425 v_0 := v.Args[0]
7426
7427
7428
7429 for {
7430 off1 := v.AuxInt
7431 sym := v.Aux
7432 if v_0.Op != OpAMD64ADDQconst {
7433 break
7434 }
7435 off2 := v_0.AuxInt
7436 base := v_0.Args[0]
7437 val := v_1
7438 mem := v_2
7439 if !(is32Bit(off1 + off2)) {
7440 break
7441 }
7442 v.reset(OpAMD64CMPLload)
7443 v.AuxInt = off1 + off2
7444 v.Aux = sym
7445 v.AddArg3(base, val, mem)
7446 return true
7447 }
7448
7449
7450
7451 for {
7452 off1 := v.AuxInt
7453 sym1 := v.Aux
7454 if v_0.Op != OpAMD64LEAQ {
7455 break
7456 }
7457 off2 := v_0.AuxInt
7458 sym2 := v_0.Aux
7459 base := v_0.Args[0]
7460 val := v_1
7461 mem := v_2
7462 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
7463 break
7464 }
7465 v.reset(OpAMD64CMPLload)
7466 v.AuxInt = off1 + off2
7467 v.Aux = mergeSym(sym1, sym2)
7468 v.AddArg3(base, val, mem)
7469 return true
7470 }
7471
7472
7473
7474 for {
7475 off := v.AuxInt
7476 sym := v.Aux
7477 ptr := v_0
7478 if v_1.Op != OpAMD64MOVLconst {
7479 break
7480 }
7481 c := v_1.AuxInt
7482 mem := v_2
7483 if !(validValAndOff(c, off)) {
7484 break
7485 }
7486 v.reset(OpAMD64CMPLconstload)
7487 v.AuxInt = makeValAndOff(c, off)
7488 v.Aux = sym
7489 v.AddArg2(ptr, mem)
7490 return true
7491 }
7492 return false
7493 }
7494 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7495 v_1 := v.Args[1]
7496 v_0 := v.Args[0]
7497 b := v.Block
7498
7499
7500
7501 for {
7502 x := v_0
7503 if v_1.Op != OpAMD64MOVQconst {
7504 break
7505 }
7506 c := v_1.AuxInt
7507 if !(is32Bit(c)) {
7508 break
7509 }
7510 v.reset(OpAMD64CMPQconst)
7511 v.AuxInt = c
7512 v.AddArg(x)
7513 return true
7514 }
7515
7516
7517
7518 for {
7519 if v_0.Op != OpAMD64MOVQconst {
7520 break
7521 }
7522 c := v_0.AuxInt
7523 x := v_1
7524 if !(is32Bit(c)) {
7525 break
7526 }
7527 v.reset(OpAMD64InvertFlags)
7528 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7529 v0.AuxInt = c
7530 v0.AddArg(x)
7531 v.AddArg(v0)
7532 return true
7533 }
7534
7535
7536
7537 for {
7538 x := v_0
7539 y := v_1
7540 if !(x.ID > y.ID) {
7541 break
7542 }
7543 v.reset(OpAMD64InvertFlags)
7544 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7545 v0.AddArg2(y, x)
7546 v.AddArg(v0)
7547 return true
7548 }
7549
7550
7551
7552 for {
7553 if v_0.Op != OpAMD64MOVQconst {
7554 break
7555 }
7556 x := v_0.AuxInt
7557 if v_1.Op != OpAMD64MOVQconst {
7558 break
7559 }
7560 y := v_1.AuxInt
7561 if !(x == y) {
7562 break
7563 }
7564 v.reset(OpAMD64FlagEQ)
7565 return true
7566 }
7567
7568
7569
7570 for {
7571 if v_0.Op != OpAMD64MOVQconst {
7572 break
7573 }
7574 x := v_0.AuxInt
7575 if v_1.Op != OpAMD64MOVQconst {
7576 break
7577 }
7578 y := v_1.AuxInt
7579 if !(x < y && uint64(x) < uint64(y)) {
7580 break
7581 }
7582 v.reset(OpAMD64FlagLT_ULT)
7583 return true
7584 }
7585
7586
7587
7588 for {
7589 if v_0.Op != OpAMD64MOVQconst {
7590 break
7591 }
7592 x := v_0.AuxInt
7593 if v_1.Op != OpAMD64MOVQconst {
7594 break
7595 }
7596 y := v_1.AuxInt
7597 if !(x < y && uint64(x) > uint64(y)) {
7598 break
7599 }
7600 v.reset(OpAMD64FlagLT_UGT)
7601 return true
7602 }
7603
7604
7605
7606 for {
7607 if v_0.Op != OpAMD64MOVQconst {
7608 break
7609 }
7610 x := v_0.AuxInt
7611 if v_1.Op != OpAMD64MOVQconst {
7612 break
7613 }
7614 y := v_1.AuxInt
7615 if !(x > y && uint64(x) < uint64(y)) {
7616 break
7617 }
7618 v.reset(OpAMD64FlagGT_ULT)
7619 return true
7620 }
7621
7622
7623
7624 for {
7625 if v_0.Op != OpAMD64MOVQconst {
7626 break
7627 }
7628 x := v_0.AuxInt
7629 if v_1.Op != OpAMD64MOVQconst {
7630 break
7631 }
7632 y := v_1.AuxInt
7633 if !(x > y && uint64(x) > uint64(y)) {
7634 break
7635 }
7636 v.reset(OpAMD64FlagGT_UGT)
7637 return true
7638 }
7639
7640
7641
7642 for {
7643 l := v_0
7644 if l.Op != OpAMD64MOVQload {
7645 break
7646 }
7647 off := l.AuxInt
7648 sym := l.Aux
7649 mem := l.Args[1]
7650 ptr := l.Args[0]
7651 x := v_1
7652 if !(canMergeLoad(v, l) && clobber(l)) {
7653 break
7654 }
7655 v.reset(OpAMD64CMPQload)
7656 v.AuxInt = off
7657 v.Aux = sym
7658 v.AddArg3(ptr, x, mem)
7659 return true
7660 }
7661
7662
7663
7664 for {
7665 x := v_0
7666 l := v_1
7667 if l.Op != OpAMD64MOVQload {
7668 break
7669 }
7670 off := l.AuxInt
7671 sym := l.Aux
7672 mem := l.Args[1]
7673 ptr := l.Args[0]
7674 if !(canMergeLoad(v, l) && clobber(l)) {
7675 break
7676 }
7677 v.reset(OpAMD64InvertFlags)
7678 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7679 v0.AuxInt = off
7680 v0.Aux = sym
7681 v0.AddArg3(ptr, x, mem)
7682 v.AddArg(v0)
7683 return true
7684 }
7685 return false
7686 }
7687 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7688 v_0 := v.Args[0]
7689 b := v.Block
7690
7691
7692 for {
7693 if v.AuxInt != 32 || v_0.Op != OpAMD64NEGQ {
7694 break
7695 }
7696 v_0_0 := v_0.Args[0]
7697 if v_0_0.Op != OpAMD64ADDQconst || v_0_0.AuxInt != -16 {
7698 break
7699 }
7700 v_0_0_0 := v_0_0.Args[0]
7701 if v_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0.AuxInt != 15 {
7702 break
7703 }
7704 v.reset(OpAMD64FlagLT_ULT)
7705 return true
7706 }
7707
7708
7709 for {
7710 if v.AuxInt != 32 || v_0.Op != OpAMD64NEGQ {
7711 break
7712 }
7713 v_0_0 := v_0.Args[0]
7714 if v_0_0.Op != OpAMD64ADDQconst || v_0_0.AuxInt != -8 {
7715 break
7716 }
7717 v_0_0_0 := v_0_0.Args[0]
7718 if v_0_0_0.Op != OpAMD64ANDQconst || v_0_0_0.AuxInt != 7 {
7719 break
7720 }
7721 v.reset(OpAMD64FlagLT_ULT)
7722 return true
7723 }
7724
7725
7726
7727 for {
7728 y := v.AuxInt
7729 if v_0.Op != OpAMD64MOVQconst {
7730 break
7731 }
7732 x := v_0.AuxInt
7733 if !(x == y) {
7734 break
7735 }
7736 v.reset(OpAMD64FlagEQ)
7737 return true
7738 }
7739
7740
7741
7742 for {
7743 y := v.AuxInt
7744 if v_0.Op != OpAMD64MOVQconst {
7745 break
7746 }
7747 x := v_0.AuxInt
7748 if !(x < y && uint64(x) < uint64(y)) {
7749 break
7750 }
7751 v.reset(OpAMD64FlagLT_ULT)
7752 return true
7753 }
7754
7755
7756
7757 for {
7758 y := v.AuxInt
7759 if v_0.Op != OpAMD64MOVQconst {
7760 break
7761 }
7762 x := v_0.AuxInt
7763 if !(x < y && uint64(x) > uint64(y)) {
7764 break
7765 }
7766 v.reset(OpAMD64FlagLT_UGT)
7767 return true
7768 }
7769
7770
7771
7772 for {
7773 y := v.AuxInt
7774 if v_0.Op != OpAMD64MOVQconst {
7775 break
7776 }
7777 x := v_0.AuxInt
7778 if !(x > y && uint64(x) < uint64(y)) {
7779 break
7780 }
7781 v.reset(OpAMD64FlagGT_ULT)
7782 return true
7783 }
7784
7785
7786
7787 for {
7788 y := v.AuxInt
7789 if v_0.Op != OpAMD64MOVQconst {
7790 break
7791 }
7792 x := v_0.AuxInt
7793 if !(x > y && uint64(x) > uint64(y)) {
7794 break
7795 }
7796 v.reset(OpAMD64FlagGT_UGT)
7797 return true
7798 }
7799
7800
7801
7802 for {
7803 c := v.AuxInt
7804 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7805 break
7806 }
7807 v.reset(OpAMD64FlagLT_ULT)
7808 return true
7809 }
7810
7811
7812
7813 for {
7814 c := v.AuxInt
7815 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7816 break
7817 }
7818 v.reset(OpAMD64FlagLT_ULT)
7819 return true
7820 }
7821
7822
7823
7824 for {
7825 c := v.AuxInt
7826 if v_0.Op != OpAMD64MOVLQZX || !(0xFFFFFFFF < c) {
7827 break
7828 }
7829 v.reset(OpAMD64FlagLT_ULT)
7830 return true
7831 }
7832
7833
7834
7835 for {
7836 n := v.AuxInt
7837 if v_0.Op != OpAMD64SHRQconst {
7838 break
7839 }
7840 c := v_0.AuxInt
7841 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7842 break
7843 }
7844 v.reset(OpAMD64FlagLT_ULT)
7845 return true
7846 }
7847
7848
7849
7850 for {
7851 n := v.AuxInt
7852 if v_0.Op != OpAMD64ANDQconst {
7853 break
7854 }
7855 m := v_0.AuxInt
7856 if !(0 <= m && m < n) {
7857 break
7858 }
7859 v.reset(OpAMD64FlagLT_ULT)
7860 return true
7861 }
7862
7863
7864
7865 for {
7866 n := v.AuxInt
7867 if v_0.Op != OpAMD64ANDLconst {
7868 break
7869 }
7870 m := v_0.AuxInt
7871 if !(0 <= m && m < n) {
7872 break
7873 }
7874 v.reset(OpAMD64FlagLT_ULT)
7875 return true
7876 }
7877
7878
7879 for {
7880 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDQ {
7881 break
7882 }
7883 y := v_0.Args[1]
7884 x := v_0.Args[0]
7885 v.reset(OpAMD64TESTQ)
7886 v.AddArg2(x, y)
7887 return true
7888 }
7889
7890
7891 for {
7892 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDQconst {
7893 break
7894 }
7895 c := v_0.AuxInt
7896 x := v_0.Args[0]
7897 v.reset(OpAMD64TESTQconst)
7898 v.AuxInt = c
7899 v.AddArg(x)
7900 return true
7901 }
7902
7903
7904 for {
7905 if v.AuxInt != 0 {
7906 break
7907 }
7908 x := v_0
7909 v.reset(OpAMD64TESTQ)
7910 v.AddArg2(x, x)
7911 return true
7912 }
7913
7914
7915
7916 for {
7917 c := auxIntToInt32(v.AuxInt)
7918 l := v_0
7919 if l.Op != OpAMD64MOVQload {
7920 break
7921 }
7922 off := auxIntToInt32(l.AuxInt)
7923 sym := auxToSym(l.Aux)
7924 mem := l.Args[1]
7925 ptr := l.Args[0]
7926 if !(l.Uses == 1 && clobber(l)) {
7927 break
7928 }
7929 b = l.Block
7930 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7931 v.copyOf(v0)
7932 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off))
7933 v0.Aux = symToAux(sym)
7934 v0.AddArg2(ptr, mem)
7935 return true
7936 }
7937 return false
7938 }
7939 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7940 v_1 := v.Args[1]
7941 v_0 := v.Args[0]
7942
7943
7944
7945 for {
7946 valoff1 := v.AuxInt
7947 sym := v.Aux
7948 if v_0.Op != OpAMD64ADDQconst {
7949 break
7950 }
7951 off2 := v_0.AuxInt
7952 base := v_0.Args[0]
7953 mem := v_1
7954 if !(ValAndOff(valoff1).canAdd(off2)) {
7955 break
7956 }
7957 v.reset(OpAMD64CMPQconstload)
7958 v.AuxInt = ValAndOff(valoff1).add(off2)
7959 v.Aux = sym
7960 v.AddArg2(base, mem)
7961 return true
7962 }
7963
7964
7965
7966 for {
7967 valoff1 := v.AuxInt
7968 sym1 := v.Aux
7969 if v_0.Op != OpAMD64LEAQ {
7970 break
7971 }
7972 off2 := v_0.AuxInt
7973 sym2 := v_0.Aux
7974 base := v_0.Args[0]
7975 mem := v_1
7976 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
7977 break
7978 }
7979 v.reset(OpAMD64CMPQconstload)
7980 v.AuxInt = ValAndOff(valoff1).add(off2)
7981 v.Aux = mergeSym(sym1, sym2)
7982 v.AddArg2(base, mem)
7983 return true
7984 }
7985 return false
7986 }
7987 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7988 v_2 := v.Args[2]
7989 v_1 := v.Args[1]
7990 v_0 := v.Args[0]
7991
7992
7993
7994 for {
7995 off1 := v.AuxInt
7996 sym := v.Aux
7997 if v_0.Op != OpAMD64ADDQconst {
7998 break
7999 }
8000 off2 := v_0.AuxInt
8001 base := v_0.Args[0]
8002 val := v_1
8003 mem := v_2
8004 if !(is32Bit(off1 + off2)) {
8005 break
8006 }
8007 v.reset(OpAMD64CMPQload)
8008 v.AuxInt = off1 + off2
8009 v.Aux = sym
8010 v.AddArg3(base, val, mem)
8011 return true
8012 }
8013
8014
8015
8016 for {
8017 off1 := v.AuxInt
8018 sym1 := v.Aux
8019 if v_0.Op != OpAMD64LEAQ {
8020 break
8021 }
8022 off2 := v_0.AuxInt
8023 sym2 := v_0.Aux
8024 base := v_0.Args[0]
8025 val := v_1
8026 mem := v_2
8027 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
8028 break
8029 }
8030 v.reset(OpAMD64CMPQload)
8031 v.AuxInt = off1 + off2
8032 v.Aux = mergeSym(sym1, sym2)
8033 v.AddArg3(base, val, mem)
8034 return true
8035 }
8036
8037
8038
8039 for {
8040 off := v.AuxInt
8041 sym := v.Aux
8042 ptr := v_0
8043 if v_1.Op != OpAMD64MOVQconst {
8044 break
8045 }
8046 c := v_1.AuxInt
8047 mem := v_2
8048 if !(validValAndOff(c, off)) {
8049 break
8050 }
8051 v.reset(OpAMD64CMPQconstload)
8052 v.AuxInt = makeValAndOff(c, off)
8053 v.Aux = sym
8054 v.AddArg2(ptr, mem)
8055 return true
8056 }
8057 return false
8058 }
8059 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
8060 v_1 := v.Args[1]
8061 v_0 := v.Args[0]
8062 b := v.Block
8063
8064
8065 for {
8066 x := v_0
8067 if v_1.Op != OpAMD64MOVLconst {
8068 break
8069 }
8070 c := v_1.AuxInt
8071 v.reset(OpAMD64CMPWconst)
8072 v.AuxInt = int64(int16(c))
8073 v.AddArg(x)
8074 return true
8075 }
8076
8077
8078 for {
8079 if v_0.Op != OpAMD64MOVLconst {
8080 break
8081 }
8082 c := v_0.AuxInt
8083 x := v_1
8084 v.reset(OpAMD64InvertFlags)
8085 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
8086 v0.AuxInt = int64(int16(c))
8087 v0.AddArg(x)
8088 v.AddArg(v0)
8089 return true
8090 }
8091
8092
8093
8094 for {
8095 x := v_0
8096 y := v_1
8097 if !(x.ID > y.ID) {
8098 break
8099 }
8100 v.reset(OpAMD64InvertFlags)
8101 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
8102 v0.AddArg2(y, x)
8103 v.AddArg(v0)
8104 return true
8105 }
8106
8107
8108
8109 for {
8110 l := v_0
8111 if l.Op != OpAMD64MOVWload {
8112 break
8113 }
8114 off := l.AuxInt
8115 sym := l.Aux
8116 mem := l.Args[1]
8117 ptr := l.Args[0]
8118 x := v_1
8119 if !(canMergeLoad(v, l) && clobber(l)) {
8120 break
8121 }
8122 v.reset(OpAMD64CMPWload)
8123 v.AuxInt = off
8124 v.Aux = sym
8125 v.AddArg3(ptr, x, mem)
8126 return true
8127 }
8128
8129
8130
8131 for {
8132 x := v_0
8133 l := v_1
8134 if l.Op != OpAMD64MOVWload {
8135 break
8136 }
8137 off := l.AuxInt
8138 sym := l.Aux
8139 mem := l.Args[1]
8140 ptr := l.Args[0]
8141 if !(canMergeLoad(v, l) && clobber(l)) {
8142 break
8143 }
8144 v.reset(OpAMD64InvertFlags)
8145 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
8146 v0.AuxInt = off
8147 v0.Aux = sym
8148 v0.AddArg3(ptr, x, mem)
8149 v.AddArg(v0)
8150 return true
8151 }
8152 return false
8153 }
8154 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
8155 v_0 := v.Args[0]
8156 b := v.Block
8157
8158
8159
8160 for {
8161 y := v.AuxInt
8162 if v_0.Op != OpAMD64MOVLconst {
8163 break
8164 }
8165 x := v_0.AuxInt
8166 if !(int16(x) == int16(y)) {
8167 break
8168 }
8169 v.reset(OpAMD64FlagEQ)
8170 return true
8171 }
8172
8173
8174
8175 for {
8176 y := v.AuxInt
8177 if v_0.Op != OpAMD64MOVLconst {
8178 break
8179 }
8180 x := v_0.AuxInt
8181 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
8182 break
8183 }
8184 v.reset(OpAMD64FlagLT_ULT)
8185 return true
8186 }
8187
8188
8189
8190 for {
8191 y := v.AuxInt
8192 if v_0.Op != OpAMD64MOVLconst {
8193 break
8194 }
8195 x := v_0.AuxInt
8196 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
8197 break
8198 }
8199 v.reset(OpAMD64FlagLT_UGT)
8200 return true
8201 }
8202
8203
8204
8205 for {
8206 y := v.AuxInt
8207 if v_0.Op != OpAMD64MOVLconst {
8208 break
8209 }
8210 x := v_0.AuxInt
8211 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
8212 break
8213 }
8214 v.reset(OpAMD64FlagGT_ULT)
8215 return true
8216 }
8217
8218
8219
8220 for {
8221 y := v.AuxInt
8222 if v_0.Op != OpAMD64MOVLconst {
8223 break
8224 }
8225 x := v_0.AuxInt
8226 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
8227 break
8228 }
8229 v.reset(OpAMD64FlagGT_UGT)
8230 return true
8231 }
8232
8233
8234
8235 for {
8236 n := v.AuxInt
8237 if v_0.Op != OpAMD64ANDLconst {
8238 break
8239 }
8240 m := v_0.AuxInt
8241 if !(0 <= int16(m) && int16(m) < int16(n)) {
8242 break
8243 }
8244 v.reset(OpAMD64FlagLT_ULT)
8245 return true
8246 }
8247
8248
8249 for {
8250 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDL {
8251 break
8252 }
8253 y := v_0.Args[1]
8254 x := v_0.Args[0]
8255 v.reset(OpAMD64TESTW)
8256 v.AddArg2(x, y)
8257 return true
8258 }
8259
8260
8261 for {
8262 if v.AuxInt != 0 || v_0.Op != OpAMD64ANDLconst {
8263 break
8264 }
8265 c := v_0.AuxInt
8266 x := v_0.Args[0]
8267 v.reset(OpAMD64TESTWconst)
8268 v.AuxInt = int64(int16(c))
8269 v.AddArg(x)
8270 return true
8271 }
8272
8273
8274 for {
8275 if v.AuxInt != 0 {
8276 break
8277 }
8278 x := v_0
8279 v.reset(OpAMD64TESTW)
8280 v.AddArg2(x, x)
8281 return true
8282 }
8283
8284
8285
8286 for {
8287 c := auxIntToInt16(v.AuxInt)
8288 l := v_0
8289 if l.Op != OpAMD64MOVWload {
8290 break
8291 }
8292 off := auxIntToInt32(l.AuxInt)
8293 sym := auxToSym(l.Aux)
8294 mem := l.Args[1]
8295 ptr := l.Args[0]
8296 if !(l.Uses == 1 && clobber(l)) {
8297 break
8298 }
8299 b = l.Block
8300 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8301 v.copyOf(v0)
8302 v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off))
8303 v0.Aux = symToAux(sym)
8304 v0.AddArg2(ptr, mem)
8305 return true
8306 }
8307 return false
8308 }
8309 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8310 v_1 := v.Args[1]
8311 v_0 := v.Args[0]
8312
8313
8314
8315 for {
8316 valoff1 := v.AuxInt
8317 sym := v.Aux
8318 if v_0.Op != OpAMD64ADDQconst {
8319 break
8320 }
8321 off2 := v_0.AuxInt
8322 base := v_0.Args[0]
8323 mem := v_1
8324 if !(ValAndOff(valoff1).canAdd(off2)) {
8325 break
8326 }
8327 v.reset(OpAMD64CMPWconstload)
8328 v.AuxInt = ValAndOff(valoff1).add(off2)
8329 v.Aux = sym
8330 v.AddArg2(base, mem)
8331 return true
8332 }
8333
8334
8335
8336 for {
8337 valoff1 := v.AuxInt
8338 sym1 := v.Aux
8339 if v_0.Op != OpAMD64LEAQ {
8340 break
8341 }
8342 off2 := v_0.AuxInt
8343 sym2 := v_0.Aux
8344 base := v_0.Args[0]
8345 mem := v_1
8346 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
8347 break
8348 }
8349 v.reset(OpAMD64CMPWconstload)
8350 v.AuxInt = ValAndOff(valoff1).add(off2)
8351 v.Aux = mergeSym(sym1, sym2)
8352 v.AddArg2(base, mem)
8353 return true
8354 }
8355 return false
8356 }
8357 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8358 v_2 := v.Args[2]
8359 v_1 := v.Args[1]
8360 v_0 := v.Args[0]
8361
8362
8363
8364 for {
8365 off1 := v.AuxInt
8366 sym := v.Aux
8367 if v_0.Op != OpAMD64ADDQconst {
8368 break
8369 }
8370 off2 := v_0.AuxInt
8371 base := v_0.Args[0]
8372 val := v_1
8373 mem := v_2
8374 if !(is32Bit(off1 + off2)) {
8375 break
8376 }
8377 v.reset(OpAMD64CMPWload)
8378 v.AuxInt = off1 + off2
8379 v.Aux = sym
8380 v.AddArg3(base, val, mem)
8381 return true
8382 }
8383
8384
8385
8386 for {
8387 off1 := v.AuxInt
8388 sym1 := v.Aux
8389 if v_0.Op != OpAMD64LEAQ {
8390 break
8391 }
8392 off2 := v_0.AuxInt
8393 sym2 := v_0.Aux
8394 base := v_0.Args[0]
8395 val := v_1
8396 mem := v_2
8397 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
8398 break
8399 }
8400 v.reset(OpAMD64CMPWload)
8401 v.AuxInt = off1 + off2
8402 v.Aux = mergeSym(sym1, sym2)
8403 v.AddArg3(base, val, mem)
8404 return true
8405 }
8406
8407
8408
8409 for {
8410 off := v.AuxInt
8411 sym := v.Aux
8412 ptr := v_0
8413 if v_1.Op != OpAMD64MOVLconst {
8414 break
8415 }
8416 c := v_1.AuxInt
8417 mem := v_2
8418 if !(validValAndOff(int64(int16(c)), off)) {
8419 break
8420 }
8421 v.reset(OpAMD64CMPWconstload)
8422 v.AuxInt = makeValAndOff(int64(int16(c)), off)
8423 v.Aux = sym
8424 v.AddArg2(ptr, mem)
8425 return true
8426 }
8427 return false
8428 }
8429 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8430 v_3 := v.Args[3]
8431 v_2 := v.Args[2]
8432 v_1 := v.Args[1]
8433 v_0 := v.Args[0]
8434
8435
8436
8437 for {
8438 off1 := v.AuxInt
8439 sym := v.Aux
8440 if v_0.Op != OpAMD64ADDQconst {
8441 break
8442 }
8443 off2 := v_0.AuxInt
8444 ptr := v_0.Args[0]
8445 old := v_1
8446 new_ := v_2
8447 mem := v_3
8448 if !(is32Bit(off1 + off2)) {
8449 break
8450 }
8451 v.reset(OpAMD64CMPXCHGLlock)
8452 v.AuxInt = off1 + off2
8453 v.Aux = sym
8454 v.AddArg4(ptr, old, new_, mem)
8455 return true
8456 }
8457 return false
8458 }
8459 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8460 v_3 := v.Args[3]
8461 v_2 := v.Args[2]
8462 v_1 := v.Args[1]
8463 v_0 := v.Args[0]
8464
8465
8466
8467 for {
8468 off1 := v.AuxInt
8469 sym := v.Aux
8470 if v_0.Op != OpAMD64ADDQconst {
8471 break
8472 }
8473 off2 := v_0.AuxInt
8474 ptr := v_0.Args[0]
8475 old := v_1
8476 new_ := v_2
8477 mem := v_3
8478 if !(is32Bit(off1 + off2)) {
8479 break
8480 }
8481 v.reset(OpAMD64CMPXCHGQlock)
8482 v.AuxInt = off1 + off2
8483 v.Aux = sym
8484 v.AddArg4(ptr, old, new_, mem)
8485 return true
8486 }
8487 return false
8488 }
8489 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8490 v_1 := v.Args[1]
8491 v_0 := v.Args[0]
8492
8493
8494
8495 for {
8496 x := v_0
8497 l := v_1
8498 if l.Op != OpAMD64MOVSDload {
8499 break
8500 }
8501 off := l.AuxInt
8502 sym := l.Aux
8503 mem := l.Args[1]
8504 ptr := l.Args[0]
8505 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8506 break
8507 }
8508 v.reset(OpAMD64DIVSDload)
8509 v.AuxInt = off
8510 v.Aux = sym
8511 v.AddArg3(x, ptr, mem)
8512 return true
8513 }
8514 return false
8515 }
8516 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8517 v_2 := v.Args[2]
8518 v_1 := v.Args[1]
8519 v_0 := v.Args[0]
8520
8521
8522
8523 for {
8524 off1 := v.AuxInt
8525 sym := v.Aux
8526 val := v_0
8527 if v_1.Op != OpAMD64ADDQconst {
8528 break
8529 }
8530 off2 := v_1.AuxInt
8531 base := v_1.Args[0]
8532 mem := v_2
8533 if !(is32Bit(off1 + off2)) {
8534 break
8535 }
8536 v.reset(OpAMD64DIVSDload)
8537 v.AuxInt = off1 + off2
8538 v.Aux = sym
8539 v.AddArg3(val, base, mem)
8540 return true
8541 }
8542
8543
8544
8545 for {
8546 off1 := v.AuxInt
8547 sym1 := v.Aux
8548 val := v_0
8549 if v_1.Op != OpAMD64LEAQ {
8550 break
8551 }
8552 off2 := v_1.AuxInt
8553 sym2 := v_1.Aux
8554 base := v_1.Args[0]
8555 mem := v_2
8556 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
8557 break
8558 }
8559 v.reset(OpAMD64DIVSDload)
8560 v.AuxInt = off1 + off2
8561 v.Aux = mergeSym(sym1, sym2)
8562 v.AddArg3(val, base, mem)
8563 return true
8564 }
8565 return false
8566 }
8567 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8568 v_1 := v.Args[1]
8569 v_0 := v.Args[0]
8570
8571
8572
8573 for {
8574 x := v_0
8575 l := v_1
8576 if l.Op != OpAMD64MOVSSload {
8577 break
8578 }
8579 off := l.AuxInt
8580 sym := l.Aux
8581 mem := l.Args[1]
8582 ptr := l.Args[0]
8583 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8584 break
8585 }
8586 v.reset(OpAMD64DIVSSload)
8587 v.AuxInt = off
8588 v.Aux = sym
8589 v.AddArg3(x, ptr, mem)
8590 return true
8591 }
8592 return false
8593 }
8594 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8595 v_2 := v.Args[2]
8596 v_1 := v.Args[1]
8597 v_0 := v.Args[0]
8598
8599
8600
8601 for {
8602 off1 := v.AuxInt
8603 sym := v.Aux
8604 val := v_0
8605 if v_1.Op != OpAMD64ADDQconst {
8606 break
8607 }
8608 off2 := v_1.AuxInt
8609 base := v_1.Args[0]
8610 mem := v_2
8611 if !(is32Bit(off1 + off2)) {
8612 break
8613 }
8614 v.reset(OpAMD64DIVSSload)
8615 v.AuxInt = off1 + off2
8616 v.Aux = sym
8617 v.AddArg3(val, base, mem)
8618 return true
8619 }
8620
8621
8622
8623 for {
8624 off1 := v.AuxInt
8625 sym1 := v.Aux
8626 val := v_0
8627 if v_1.Op != OpAMD64LEAQ {
8628 break
8629 }
8630 off2 := v_1.AuxInt
8631 sym2 := v_1.Aux
8632 base := v_1.Args[0]
8633 mem := v_2
8634 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
8635 break
8636 }
8637 v.reset(OpAMD64DIVSSload)
8638 v.AuxInt = off1 + off2
8639 v.Aux = mergeSym(sym1, sym2)
8640 v.AddArg3(val, base, mem)
8641 return true
8642 }
8643 return false
8644 }
8645 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8646 v_1 := v.Args[1]
8647 v_0 := v.Args[0]
8648
8649
8650
8651 for {
8652 x := v_0
8653 y := v_1
8654 if !(!x.rematerializeable() && y.rematerializeable()) {
8655 break
8656 }
8657 v.reset(OpAMD64HMULL)
8658 v.AddArg2(y, x)
8659 return true
8660 }
8661 return false
8662 }
8663 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8664 v_1 := v.Args[1]
8665 v_0 := v.Args[0]
8666
8667
8668
8669 for {
8670 x := v_0
8671 y := v_1
8672 if !(!x.rematerializeable() && y.rematerializeable()) {
8673 break
8674 }
8675 v.reset(OpAMD64HMULLU)
8676 v.AddArg2(y, x)
8677 return true
8678 }
8679 return false
8680 }
8681 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8682 v_1 := v.Args[1]
8683 v_0 := v.Args[0]
8684
8685
8686
8687 for {
8688 x := v_0
8689 y := v_1
8690 if !(!x.rematerializeable() && y.rematerializeable()) {
8691 break
8692 }
8693 v.reset(OpAMD64HMULQ)
8694 v.AddArg2(y, x)
8695 return true
8696 }
8697 return false
8698 }
8699 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8700 v_1 := v.Args[1]
8701 v_0 := v.Args[0]
8702
8703
8704
8705 for {
8706 x := v_0
8707 y := v_1
8708 if !(!x.rematerializeable() && y.rematerializeable()) {
8709 break
8710 }
8711 v.reset(OpAMD64HMULQU)
8712 v.AddArg2(y, x)
8713 return true
8714 }
8715 return false
8716 }
8717 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8718 v_0 := v.Args[0]
8719
8720
8721
8722 for {
8723 c := v.AuxInt
8724 s := v.Aux
8725 if v_0.Op != OpAMD64ADDLconst {
8726 break
8727 }
8728 d := v_0.AuxInt
8729 x := v_0.Args[0]
8730 if !(is32Bit(c + d)) {
8731 break
8732 }
8733 v.reset(OpAMD64LEAL)
8734 v.AuxInt = c + d
8735 v.Aux = s
8736 v.AddArg(x)
8737 return true
8738 }
8739
8740
8741
8742 for {
8743 c := v.AuxInt
8744 s := v.Aux
8745 if v_0.Op != OpAMD64ADDL {
8746 break
8747 }
8748 _ = v_0.Args[1]
8749 v_0_0 := v_0.Args[0]
8750 v_0_1 := v_0.Args[1]
8751 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8752 x := v_0_0
8753 y := v_0_1
8754 if !(x.Op != OpSB && y.Op != OpSB) {
8755 continue
8756 }
8757 v.reset(OpAMD64LEAL1)
8758 v.AuxInt = c
8759 v.Aux = s
8760 v.AddArg2(x, y)
8761 return true
8762 }
8763 break
8764 }
8765 return false
8766 }
8767 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8768 v_1 := v.Args[1]
8769 v_0 := v.Args[0]
8770
8771
8772
8773 for {
8774 c := v.AuxInt
8775 s := v.Aux
8776 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8777 if v_0.Op != OpAMD64ADDLconst {
8778 continue
8779 }
8780 d := v_0.AuxInt
8781 x := v_0.Args[0]
8782 y := v_1
8783 if !(is32Bit(c+d) && x.Op != OpSB) {
8784 continue
8785 }
8786 v.reset(OpAMD64LEAL1)
8787 v.AuxInt = c + d
8788 v.Aux = s
8789 v.AddArg2(x, y)
8790 return true
8791 }
8792 break
8793 }
8794
8795
8796 for {
8797 c := v.AuxInt
8798 s := v.Aux
8799 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8800 x := v_0
8801 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 {
8802 continue
8803 }
8804 y := v_1.Args[0]
8805 v.reset(OpAMD64LEAL2)
8806 v.AuxInt = c
8807 v.Aux = s
8808 v.AddArg2(x, y)
8809 return true
8810 }
8811 break
8812 }
8813
8814
8815 for {
8816 c := v.AuxInt
8817 s := v.Aux
8818 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8819 x := v_0
8820 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 {
8821 continue
8822 }
8823 y := v_1.Args[0]
8824 v.reset(OpAMD64LEAL4)
8825 v.AuxInt = c
8826 v.Aux = s
8827 v.AddArg2(x, y)
8828 return true
8829 }
8830 break
8831 }
8832
8833
8834 for {
8835 c := v.AuxInt
8836 s := v.Aux
8837 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8838 x := v_0
8839 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 3 {
8840 continue
8841 }
8842 y := v_1.Args[0]
8843 v.reset(OpAMD64LEAL8)
8844 v.AuxInt = c
8845 v.Aux = s
8846 v.AddArg2(x, y)
8847 return true
8848 }
8849 break
8850 }
8851 return false
8852 }
8853 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8854 v_1 := v.Args[1]
8855 v_0 := v.Args[0]
8856
8857
8858
8859 for {
8860 c := v.AuxInt
8861 s := v.Aux
8862 if v_0.Op != OpAMD64ADDLconst {
8863 break
8864 }
8865 d := v_0.AuxInt
8866 x := v_0.Args[0]
8867 y := v_1
8868 if !(is32Bit(c+d) && x.Op != OpSB) {
8869 break
8870 }
8871 v.reset(OpAMD64LEAL2)
8872 v.AuxInt = c + d
8873 v.Aux = s
8874 v.AddArg2(x, y)
8875 return true
8876 }
8877
8878
8879
8880 for {
8881 c := v.AuxInt
8882 s := v.Aux
8883 x := v_0
8884 if v_1.Op != OpAMD64ADDLconst {
8885 break
8886 }
8887 d := v_1.AuxInt
8888 y := v_1.Args[0]
8889 if !(is32Bit(c+2*d) && y.Op != OpSB) {
8890 break
8891 }
8892 v.reset(OpAMD64LEAL2)
8893 v.AuxInt = c + 2*d
8894 v.Aux = s
8895 v.AddArg2(x, y)
8896 return true
8897 }
8898
8899
8900 for {
8901 c := v.AuxInt
8902 s := v.Aux
8903 x := v_0
8904 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 {
8905 break
8906 }
8907 y := v_1.Args[0]
8908 v.reset(OpAMD64LEAL4)
8909 v.AuxInt = c
8910 v.Aux = s
8911 v.AddArg2(x, y)
8912 return true
8913 }
8914
8915
8916 for {
8917 c := v.AuxInt
8918 s := v.Aux
8919 x := v_0
8920 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 2 {
8921 break
8922 }
8923 y := v_1.Args[0]
8924 v.reset(OpAMD64LEAL8)
8925 v.AuxInt = c
8926 v.Aux = s
8927 v.AddArg2(x, y)
8928 return true
8929 }
8930 return false
8931 }
8932 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8933 v_1 := v.Args[1]
8934 v_0 := v.Args[0]
8935
8936
8937
8938 for {
8939 c := v.AuxInt
8940 s := v.Aux
8941 if v_0.Op != OpAMD64ADDLconst {
8942 break
8943 }
8944 d := v_0.AuxInt
8945 x := v_0.Args[0]
8946 y := v_1
8947 if !(is32Bit(c+d) && x.Op != OpSB) {
8948 break
8949 }
8950 v.reset(OpAMD64LEAL4)
8951 v.AuxInt = c + d
8952 v.Aux = s
8953 v.AddArg2(x, y)
8954 return true
8955 }
8956
8957
8958
8959 for {
8960 c := v.AuxInt
8961 s := v.Aux
8962 x := v_0
8963 if v_1.Op != OpAMD64ADDLconst {
8964 break
8965 }
8966 d := v_1.AuxInt
8967 y := v_1.Args[0]
8968 if !(is32Bit(c+4*d) && y.Op != OpSB) {
8969 break
8970 }
8971 v.reset(OpAMD64LEAL4)
8972 v.AuxInt = c + 4*d
8973 v.Aux = s
8974 v.AddArg2(x, y)
8975 return true
8976 }
8977
8978
8979 for {
8980 c := v.AuxInt
8981 s := v.Aux
8982 x := v_0
8983 if v_1.Op != OpAMD64SHLLconst || v_1.AuxInt != 1 {
8984 break
8985 }
8986 y := v_1.Args[0]
8987 v.reset(OpAMD64LEAL8)
8988 v.AuxInt = c
8989 v.Aux = s
8990 v.AddArg2(x, y)
8991 return true
8992 }
8993 return false
8994 }
8995 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8996 v_1 := v.Args[1]
8997 v_0 := v.Args[0]
8998
8999
9000
9001 for {
9002 c := v.AuxInt
9003 s := v.Aux
9004 if v_0.Op != OpAMD64ADDLconst {
9005 break
9006 }
9007 d := v_0.AuxInt
9008 x := v_0.Args[0]
9009 y := v_1
9010 if !(is32Bit(c+d) && x.Op != OpSB) {
9011 break
9012 }
9013 v.reset(OpAMD64LEAL8)
9014 v.AuxInt = c + d
9015 v.Aux = s
9016 v.AddArg2(x, y)
9017 return true
9018 }
9019
9020
9021
9022 for {
9023 c := v.AuxInt
9024 s := v.Aux
9025 x := v_0
9026 if v_1.Op != OpAMD64ADDLconst {
9027 break
9028 }
9029 d := v_1.AuxInt
9030 y := v_1.Args[0]
9031 if !(is32Bit(c+8*d) && y.Op != OpSB) {
9032 break
9033 }
9034 v.reset(OpAMD64LEAL8)
9035 v.AuxInt = c + 8*d
9036 v.Aux = s
9037 v.AddArg2(x, y)
9038 return true
9039 }
9040 return false
9041 }
9042 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
9043 v_0 := v.Args[0]
9044
9045
9046
9047 for {
9048 c := v.AuxInt
9049 s := v.Aux
9050 if v_0.Op != OpAMD64ADDQconst {
9051 break
9052 }
9053 d := v_0.AuxInt
9054 x := v_0.Args[0]
9055 if !(is32Bit(c + d)) {
9056 break
9057 }
9058 v.reset(OpAMD64LEAQ)
9059 v.AuxInt = c + d
9060 v.Aux = s
9061 v.AddArg(x)
9062 return true
9063 }
9064
9065
9066
9067 for {
9068 c := v.AuxInt
9069 s := v.Aux
9070 if v_0.Op != OpAMD64ADDQ {
9071 break
9072 }
9073 _ = v_0.Args[1]
9074 v_0_0 := v_0.Args[0]
9075 v_0_1 := v_0.Args[1]
9076 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
9077 x := v_0_0
9078 y := v_0_1
9079 if !(x.Op != OpSB && y.Op != OpSB) {
9080 continue
9081 }
9082 v.reset(OpAMD64LEAQ1)
9083 v.AuxInt = c
9084 v.Aux = s
9085 v.AddArg2(x, y)
9086 return true
9087 }
9088 break
9089 }
9090
9091
9092
9093 for {
9094 off1 := v.AuxInt
9095 sym1 := v.Aux
9096 if v_0.Op != OpAMD64LEAQ {
9097 break
9098 }
9099 off2 := v_0.AuxInt
9100 sym2 := v_0.Aux
9101 x := v_0.Args[0]
9102 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9103 break
9104 }
9105 v.reset(OpAMD64LEAQ)
9106 v.AuxInt = off1 + off2
9107 v.Aux = mergeSym(sym1, sym2)
9108 v.AddArg(x)
9109 return true
9110 }
9111
9112
9113
9114 for {
9115 off1 := v.AuxInt
9116 sym1 := v.Aux
9117 if v_0.Op != OpAMD64LEAQ1 {
9118 break
9119 }
9120 off2 := v_0.AuxInt
9121 sym2 := v_0.Aux
9122 y := v_0.Args[1]
9123 x := v_0.Args[0]
9124 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9125 break
9126 }
9127 v.reset(OpAMD64LEAQ1)
9128 v.AuxInt = off1 + off2
9129 v.Aux = mergeSym(sym1, sym2)
9130 v.AddArg2(x, y)
9131 return true
9132 }
9133
9134
9135
9136 for {
9137 off1 := v.AuxInt
9138 sym1 := v.Aux
9139 if v_0.Op != OpAMD64LEAQ2 {
9140 break
9141 }
9142 off2 := v_0.AuxInt
9143 sym2 := v_0.Aux
9144 y := v_0.Args[1]
9145 x := v_0.Args[0]
9146 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9147 break
9148 }
9149 v.reset(OpAMD64LEAQ2)
9150 v.AuxInt = off1 + off2
9151 v.Aux = mergeSym(sym1, sym2)
9152 v.AddArg2(x, y)
9153 return true
9154 }
9155
9156
9157
9158 for {
9159 off1 := v.AuxInt
9160 sym1 := v.Aux
9161 if v_0.Op != OpAMD64LEAQ4 {
9162 break
9163 }
9164 off2 := v_0.AuxInt
9165 sym2 := v_0.Aux
9166 y := v_0.Args[1]
9167 x := v_0.Args[0]
9168 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9169 break
9170 }
9171 v.reset(OpAMD64LEAQ4)
9172 v.AuxInt = off1 + off2
9173 v.Aux = mergeSym(sym1, sym2)
9174 v.AddArg2(x, y)
9175 return true
9176 }
9177
9178
9179
9180 for {
9181 off1 := v.AuxInt
9182 sym1 := v.Aux
9183 if v_0.Op != OpAMD64LEAQ8 {
9184 break
9185 }
9186 off2 := v_0.AuxInt
9187 sym2 := v_0.Aux
9188 y := v_0.Args[1]
9189 x := v_0.Args[0]
9190 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9191 break
9192 }
9193 v.reset(OpAMD64LEAQ8)
9194 v.AuxInt = off1 + off2
9195 v.Aux = mergeSym(sym1, sym2)
9196 v.AddArg2(x, y)
9197 return true
9198 }
9199 return false
9200 }
9201 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
9202 v_1 := v.Args[1]
9203 v_0 := v.Args[0]
9204
9205
9206
9207 for {
9208 c := v.AuxInt
9209 s := v.Aux
9210 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9211 if v_0.Op != OpAMD64ADDQconst {
9212 continue
9213 }
9214 d := v_0.AuxInt
9215 x := v_0.Args[0]
9216 y := v_1
9217 if !(is32Bit(c+d) && x.Op != OpSB) {
9218 continue
9219 }
9220 v.reset(OpAMD64LEAQ1)
9221 v.AuxInt = c + d
9222 v.Aux = s
9223 v.AddArg2(x, y)
9224 return true
9225 }
9226 break
9227 }
9228
9229
9230 for {
9231 c := v.AuxInt
9232 s := v.Aux
9233 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9234 x := v_0
9235 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 {
9236 continue
9237 }
9238 y := v_1.Args[0]
9239 v.reset(OpAMD64LEAQ2)
9240 v.AuxInt = c
9241 v.Aux = s
9242 v.AddArg2(x, y)
9243 return true
9244 }
9245 break
9246 }
9247
9248
9249 for {
9250 c := v.AuxInt
9251 s := v.Aux
9252 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9253 x := v_0
9254 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 {
9255 continue
9256 }
9257 y := v_1.Args[0]
9258 v.reset(OpAMD64LEAQ4)
9259 v.AuxInt = c
9260 v.Aux = s
9261 v.AddArg2(x, y)
9262 return true
9263 }
9264 break
9265 }
9266
9267
9268 for {
9269 c := v.AuxInt
9270 s := v.Aux
9271 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9272 x := v_0
9273 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 3 {
9274 continue
9275 }
9276 y := v_1.Args[0]
9277 v.reset(OpAMD64LEAQ8)
9278 v.AuxInt = c
9279 v.Aux = s
9280 v.AddArg2(x, y)
9281 return true
9282 }
9283 break
9284 }
9285
9286
9287
9288 for {
9289 off1 := v.AuxInt
9290 sym1 := v.Aux
9291 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9292 if v_0.Op != OpAMD64LEAQ {
9293 continue
9294 }
9295 off2 := v_0.AuxInt
9296 sym2 := v_0.Aux
9297 x := v_0.Args[0]
9298 y := v_1
9299 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9300 continue
9301 }
9302 v.reset(OpAMD64LEAQ1)
9303 v.AuxInt = off1 + off2
9304 v.Aux = mergeSym(sym1, sym2)
9305 v.AddArg2(x, y)
9306 return true
9307 }
9308 break
9309 }
9310
9311
9312
9313 for {
9314 off1 := v.AuxInt
9315 sym1 := v.Aux
9316 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9317 x := v_0
9318 if v_1.Op != OpAMD64LEAQ1 {
9319 continue
9320 }
9321 off2 := v_1.AuxInt
9322 sym2 := v_1.Aux
9323 y := v_1.Args[1]
9324 if y != v_1.Args[0] || !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9325 continue
9326 }
9327 v.reset(OpAMD64LEAQ2)
9328 v.AuxInt = off1 + off2
9329 v.Aux = mergeSym(sym1, sym2)
9330 v.AddArg2(x, y)
9331 return true
9332 }
9333 break
9334 }
9335
9336
9337
9338 for {
9339 off1 := v.AuxInt
9340 sym1 := v.Aux
9341 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9342 x := v_0
9343 if v_1.Op != OpAMD64LEAQ1 {
9344 continue
9345 }
9346 off2 := v_1.AuxInt
9347 sym2 := v_1.Aux
9348 _ = v_1.Args[1]
9349 v_1_0 := v_1.Args[0]
9350 v_1_1 := v_1.Args[1]
9351 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9352 if x != v_1_0 {
9353 continue
9354 }
9355 y := v_1_1
9356 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9357 continue
9358 }
9359 v.reset(OpAMD64LEAQ2)
9360 v.AuxInt = off1 + off2
9361 v.Aux = mergeSym(sym1, sym2)
9362 v.AddArg2(y, x)
9363 return true
9364 }
9365 }
9366 break
9367 }
9368
9369
9370
9371 for {
9372 if v.AuxInt != 0 {
9373 break
9374 }
9375 x := v_0
9376 y := v_1
9377 if !(v.Aux == nil) {
9378 break
9379 }
9380 v.reset(OpAMD64ADDQ)
9381 v.AddArg2(x, y)
9382 return true
9383 }
9384 return false
9385 }
9386 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9387 v_1 := v.Args[1]
9388 v_0 := v.Args[0]
9389
9390
9391
9392 for {
9393 c := v.AuxInt
9394 s := v.Aux
9395 if v_0.Op != OpAMD64ADDQconst {
9396 break
9397 }
9398 d := v_0.AuxInt
9399 x := v_0.Args[0]
9400 y := v_1
9401 if !(is32Bit(c+d) && x.Op != OpSB) {
9402 break
9403 }
9404 v.reset(OpAMD64LEAQ2)
9405 v.AuxInt = c + d
9406 v.Aux = s
9407 v.AddArg2(x, y)
9408 return true
9409 }
9410
9411
9412
9413 for {
9414 c := v.AuxInt
9415 s := v.Aux
9416 x := v_0
9417 if v_1.Op != OpAMD64ADDQconst {
9418 break
9419 }
9420 d := v_1.AuxInt
9421 y := v_1.Args[0]
9422 if !(is32Bit(c+2*d) && y.Op != OpSB) {
9423 break
9424 }
9425 v.reset(OpAMD64LEAQ2)
9426 v.AuxInt = c + 2*d
9427 v.Aux = s
9428 v.AddArg2(x, y)
9429 return true
9430 }
9431
9432
9433 for {
9434 c := v.AuxInt
9435 s := v.Aux
9436 x := v_0
9437 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 {
9438 break
9439 }
9440 y := v_1.Args[0]
9441 v.reset(OpAMD64LEAQ4)
9442 v.AuxInt = c
9443 v.Aux = s
9444 v.AddArg2(x, y)
9445 return true
9446 }
9447
9448
9449 for {
9450 c := v.AuxInt
9451 s := v.Aux
9452 x := v_0
9453 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 2 {
9454 break
9455 }
9456 y := v_1.Args[0]
9457 v.reset(OpAMD64LEAQ8)
9458 v.AuxInt = c
9459 v.Aux = s
9460 v.AddArg2(x, y)
9461 return true
9462 }
9463
9464
9465
9466 for {
9467 off1 := v.AuxInt
9468 sym1 := v.Aux
9469 if v_0.Op != OpAMD64LEAQ {
9470 break
9471 }
9472 off2 := v_0.AuxInt
9473 sym2 := v_0.Aux
9474 x := v_0.Args[0]
9475 y := v_1
9476 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9477 break
9478 }
9479 v.reset(OpAMD64LEAQ2)
9480 v.AuxInt = off1 + off2
9481 v.Aux = mergeSym(sym1, sym2)
9482 v.AddArg2(x, y)
9483 return true
9484 }
9485
9486
9487
9488 for {
9489 off1 := v.AuxInt
9490 sym1 := v.Aux
9491 x := v_0
9492 if v_1.Op != OpAMD64LEAQ1 {
9493 break
9494 }
9495 off2 := v_1.AuxInt
9496 sym2 := v_1.Aux
9497 y := v_1.Args[1]
9498 if y != v_1.Args[0] || !(is32Bit(off1+2*off2) && sym2 == nil) {
9499 break
9500 }
9501 v.reset(OpAMD64LEAQ4)
9502 v.AuxInt = off1 + 2*off2
9503 v.Aux = sym1
9504 v.AddArg2(x, y)
9505 return true
9506 }
9507
9508
9509
9510 for {
9511 off := v.AuxInt
9512 sym := v.Aux
9513 x := v_0
9514 if v_1.Op != OpAMD64MOVQconst {
9515 break
9516 }
9517 scale := v_1.AuxInt
9518 if !(is32Bit(off + scale*2)) {
9519 break
9520 }
9521 v.reset(OpAMD64LEAQ)
9522 v.AuxInt = off + scale*2
9523 v.Aux = sym
9524 v.AddArg(x)
9525 return true
9526 }
9527
9528
9529
9530 for {
9531 off := v.AuxInt
9532 sym := v.Aux
9533 x := v_0
9534 if v_1.Op != OpAMD64MOVLconst {
9535 break
9536 }
9537 scale := v_1.AuxInt
9538 if !(is32Bit(off + scale*2)) {
9539 break
9540 }
9541 v.reset(OpAMD64LEAQ)
9542 v.AuxInt = off + scale*2
9543 v.Aux = sym
9544 v.AddArg(x)
9545 return true
9546 }
9547 return false
9548 }
9549 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9550 v_1 := v.Args[1]
9551 v_0 := v.Args[0]
9552
9553
9554
9555 for {
9556 c := v.AuxInt
9557 s := v.Aux
9558 if v_0.Op != OpAMD64ADDQconst {
9559 break
9560 }
9561 d := v_0.AuxInt
9562 x := v_0.Args[0]
9563 y := v_1
9564 if !(is32Bit(c+d) && x.Op != OpSB) {
9565 break
9566 }
9567 v.reset(OpAMD64LEAQ4)
9568 v.AuxInt = c + d
9569 v.Aux = s
9570 v.AddArg2(x, y)
9571 return true
9572 }
9573
9574
9575
9576 for {
9577 c := v.AuxInt
9578 s := v.Aux
9579 x := v_0
9580 if v_1.Op != OpAMD64ADDQconst {
9581 break
9582 }
9583 d := v_1.AuxInt
9584 y := v_1.Args[0]
9585 if !(is32Bit(c+4*d) && y.Op != OpSB) {
9586 break
9587 }
9588 v.reset(OpAMD64LEAQ4)
9589 v.AuxInt = c + 4*d
9590 v.Aux = s
9591 v.AddArg2(x, y)
9592 return true
9593 }
9594
9595
9596 for {
9597 c := v.AuxInt
9598 s := v.Aux
9599 x := v_0
9600 if v_1.Op != OpAMD64SHLQconst || v_1.AuxInt != 1 {
9601 break
9602 }
9603 y := v_1.Args[0]
9604 v.reset(OpAMD64LEAQ8)
9605 v.AuxInt = c
9606 v.Aux = s
9607 v.AddArg2(x, y)
9608 return true
9609 }
9610
9611
9612
9613 for {
9614 off1 := v.AuxInt
9615 sym1 := v.Aux
9616 if v_0.Op != OpAMD64LEAQ {
9617 break
9618 }
9619 off2 := v_0.AuxInt
9620 sym2 := v_0.Aux
9621 x := v_0.Args[0]
9622 y := v_1
9623 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9624 break
9625 }
9626 v.reset(OpAMD64LEAQ4)
9627 v.AuxInt = off1 + off2
9628 v.Aux = mergeSym(sym1, sym2)
9629 v.AddArg2(x, y)
9630 return true
9631 }
9632
9633
9634
9635 for {
9636 off1 := v.AuxInt
9637 sym1 := v.Aux
9638 x := v_0
9639 if v_1.Op != OpAMD64LEAQ1 {
9640 break
9641 }
9642 off2 := v_1.AuxInt
9643 sym2 := v_1.Aux
9644 y := v_1.Args[1]
9645 if y != v_1.Args[0] || !(is32Bit(off1+4*off2) && sym2 == nil) {
9646 break
9647 }
9648 v.reset(OpAMD64LEAQ8)
9649 v.AuxInt = off1 + 4*off2
9650 v.Aux = sym1
9651 v.AddArg2(x, y)
9652 return true
9653 }
9654
9655
9656
9657 for {
9658 off := v.AuxInt
9659 sym := v.Aux
9660 x := v_0
9661 if v_1.Op != OpAMD64MOVQconst {
9662 break
9663 }
9664 scale := v_1.AuxInt
9665 if !(is32Bit(off + scale*4)) {
9666 break
9667 }
9668 v.reset(OpAMD64LEAQ)
9669 v.AuxInt = off + scale*4
9670 v.Aux = sym
9671 v.AddArg(x)
9672 return true
9673 }
9674
9675
9676
9677 for {
9678 off := v.AuxInt
9679 sym := v.Aux
9680 x := v_0
9681 if v_1.Op != OpAMD64MOVLconst {
9682 break
9683 }
9684 scale := v_1.AuxInt
9685 if !(is32Bit(off + scale*4)) {
9686 break
9687 }
9688 v.reset(OpAMD64LEAQ)
9689 v.AuxInt = off + scale*4
9690 v.Aux = sym
9691 v.AddArg(x)
9692 return true
9693 }
9694 return false
9695 }
9696 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9697 v_1 := v.Args[1]
9698 v_0 := v.Args[0]
9699
9700
9701
9702 for {
9703 c := v.AuxInt
9704 s := v.Aux
9705 if v_0.Op != OpAMD64ADDQconst {
9706 break
9707 }
9708 d := v_0.AuxInt
9709 x := v_0.Args[0]
9710 y := v_1
9711 if !(is32Bit(c+d) && x.Op != OpSB) {
9712 break
9713 }
9714 v.reset(OpAMD64LEAQ8)
9715 v.AuxInt = c + d
9716 v.Aux = s
9717 v.AddArg2(x, y)
9718 return true
9719 }
9720
9721
9722
9723 for {
9724 c := v.AuxInt
9725 s := v.Aux
9726 x := v_0
9727 if v_1.Op != OpAMD64ADDQconst {
9728 break
9729 }
9730 d := v_1.AuxInt
9731 y := v_1.Args[0]
9732 if !(is32Bit(c+8*d) && y.Op != OpSB) {
9733 break
9734 }
9735 v.reset(OpAMD64LEAQ8)
9736 v.AuxInt = c + 8*d
9737 v.Aux = s
9738 v.AddArg2(x, y)
9739 return true
9740 }
9741
9742
9743
9744 for {
9745 off1 := v.AuxInt
9746 sym1 := v.Aux
9747 if v_0.Op != OpAMD64LEAQ {
9748 break
9749 }
9750 off2 := v_0.AuxInt
9751 sym2 := v_0.Aux
9752 x := v_0.Args[0]
9753 y := v_1
9754 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9755 break
9756 }
9757 v.reset(OpAMD64LEAQ8)
9758 v.AuxInt = off1 + off2
9759 v.Aux = mergeSym(sym1, sym2)
9760 v.AddArg2(x, y)
9761 return true
9762 }
9763
9764
9765
9766 for {
9767 off := v.AuxInt
9768 sym := v.Aux
9769 x := v_0
9770 if v_1.Op != OpAMD64MOVQconst {
9771 break
9772 }
9773 scale := v_1.AuxInt
9774 if !(is32Bit(off + scale*8)) {
9775 break
9776 }
9777 v.reset(OpAMD64LEAQ)
9778 v.AuxInt = off + scale*8
9779 v.Aux = sym
9780 v.AddArg(x)
9781 return true
9782 }
9783
9784
9785
9786 for {
9787 off := v.AuxInt
9788 sym := v.Aux
9789 x := v_0
9790 if v_1.Op != OpAMD64MOVLconst {
9791 break
9792 }
9793 scale := v_1.AuxInt
9794 if !(is32Bit(off + scale*8)) {
9795 break
9796 }
9797 v.reset(OpAMD64LEAQ)
9798 v.AuxInt = off + scale*8
9799 v.Aux = sym
9800 v.AddArg(x)
9801 return true
9802 }
9803 return false
9804 }
9805 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9806 v_0 := v.Args[0]
9807 b := v.Block
9808
9809
9810
9811 for {
9812 x := v_0
9813 if x.Op != OpAMD64MOVBload {
9814 break
9815 }
9816 off := x.AuxInt
9817 sym := x.Aux
9818 mem := x.Args[1]
9819 ptr := x.Args[0]
9820 if !(x.Uses == 1 && clobber(x)) {
9821 break
9822 }
9823 b = x.Block
9824 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9825 v.copyOf(v0)
9826 v0.AuxInt = off
9827 v0.Aux = sym
9828 v0.AddArg2(ptr, mem)
9829 return true
9830 }
9831
9832
9833
9834 for {
9835 x := v_0
9836 if x.Op != OpAMD64MOVWload {
9837 break
9838 }
9839 off := x.AuxInt
9840 sym := x.Aux
9841 mem := x.Args[1]
9842 ptr := x.Args[0]
9843 if !(x.Uses == 1 && clobber(x)) {
9844 break
9845 }
9846 b = x.Block
9847 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9848 v.copyOf(v0)
9849 v0.AuxInt = off
9850 v0.Aux = sym
9851 v0.AddArg2(ptr, mem)
9852 return true
9853 }
9854
9855
9856
9857 for {
9858 x := v_0
9859 if x.Op != OpAMD64MOVLload {
9860 break
9861 }
9862 off := x.AuxInt
9863 sym := x.Aux
9864 mem := x.Args[1]
9865 ptr := x.Args[0]
9866 if !(x.Uses == 1 && clobber(x)) {
9867 break
9868 }
9869 b = x.Block
9870 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9871 v.copyOf(v0)
9872 v0.AuxInt = off
9873 v0.Aux = sym
9874 v0.AddArg2(ptr, mem)
9875 return true
9876 }
9877
9878
9879
9880 for {
9881 x := v_0
9882 if x.Op != OpAMD64MOVQload {
9883 break
9884 }
9885 off := x.AuxInt
9886 sym := x.Aux
9887 mem := x.Args[1]
9888 ptr := x.Args[0]
9889 if !(x.Uses == 1 && clobber(x)) {
9890 break
9891 }
9892 b = x.Block
9893 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9894 v.copyOf(v0)
9895 v0.AuxInt = off
9896 v0.Aux = sym
9897 v0.AddArg2(ptr, mem)
9898 return true
9899 }
9900
9901
9902
9903 for {
9904 if v_0.Op != OpAMD64ANDLconst {
9905 break
9906 }
9907 c := v_0.AuxInt
9908 x := v_0.Args[0]
9909 if !(c&0x80 == 0) {
9910 break
9911 }
9912 v.reset(OpAMD64ANDLconst)
9913 v.AuxInt = c & 0x7f
9914 v.AddArg(x)
9915 return true
9916 }
9917
9918
9919 for {
9920 if v_0.Op != OpAMD64MOVBQSX {
9921 break
9922 }
9923 x := v_0.Args[0]
9924 v.reset(OpAMD64MOVBQSX)
9925 v.AddArg(x)
9926 return true
9927 }
9928 return false
9929 }
9930 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9931 v_1 := v.Args[1]
9932 v_0 := v.Args[0]
9933
9934
9935
9936 for {
9937 off := v.AuxInt
9938 sym := v.Aux
9939 ptr := v_0
9940 if v_1.Op != OpAMD64MOVBstore {
9941 break
9942 }
9943 off2 := v_1.AuxInt
9944 sym2 := v_1.Aux
9945 x := v_1.Args[1]
9946 ptr2 := v_1.Args[0]
9947 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9948 break
9949 }
9950 v.reset(OpAMD64MOVBQSX)
9951 v.AddArg(x)
9952 return true
9953 }
9954
9955
9956
9957 for {
9958 off1 := v.AuxInt
9959 sym1 := v.Aux
9960 if v_0.Op != OpAMD64LEAQ {
9961 break
9962 }
9963 off2 := v_0.AuxInt
9964 sym2 := v_0.Aux
9965 base := v_0.Args[0]
9966 mem := v_1
9967 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9968 break
9969 }
9970 v.reset(OpAMD64MOVBQSXload)
9971 v.AuxInt = off1 + off2
9972 v.Aux = mergeSym(sym1, sym2)
9973 v.AddArg2(base, mem)
9974 return true
9975 }
9976 return false
9977 }
9978 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9979 v_0 := v.Args[0]
9980 b := v.Block
9981
9982
9983
9984 for {
9985 x := v_0
9986 if x.Op != OpAMD64MOVBload {
9987 break
9988 }
9989 off := x.AuxInt
9990 sym := x.Aux
9991 mem := x.Args[1]
9992 ptr := x.Args[0]
9993 if !(x.Uses == 1 && clobber(x)) {
9994 break
9995 }
9996 b = x.Block
9997 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9998 v.copyOf(v0)
9999 v0.AuxInt = off
10000 v0.Aux = sym
10001 v0.AddArg2(ptr, mem)
10002 return true
10003 }
10004
10005
10006
10007 for {
10008 x := v_0
10009 if x.Op != OpAMD64MOVWload {
10010 break
10011 }
10012 off := x.AuxInt
10013 sym := x.Aux
10014 mem := x.Args[1]
10015 ptr := x.Args[0]
10016 if !(x.Uses == 1 && clobber(x)) {
10017 break
10018 }
10019 b = x.Block
10020 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10021 v.copyOf(v0)
10022 v0.AuxInt = off
10023 v0.Aux = sym
10024 v0.AddArg2(ptr, mem)
10025 return true
10026 }
10027
10028
10029
10030 for {
10031 x := v_0
10032 if x.Op != OpAMD64MOVLload {
10033 break
10034 }
10035 off := x.AuxInt
10036 sym := x.Aux
10037 mem := x.Args[1]
10038 ptr := x.Args[0]
10039 if !(x.Uses == 1 && clobber(x)) {
10040 break
10041 }
10042 b = x.Block
10043 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10044 v.copyOf(v0)
10045 v0.AuxInt = off
10046 v0.Aux = sym
10047 v0.AddArg2(ptr, mem)
10048 return true
10049 }
10050
10051
10052
10053 for {
10054 x := v_0
10055 if x.Op != OpAMD64MOVQload {
10056 break
10057 }
10058 off := x.AuxInt
10059 sym := x.Aux
10060 mem := x.Args[1]
10061 ptr := x.Args[0]
10062 if !(x.Uses == 1 && clobber(x)) {
10063 break
10064 }
10065 b = x.Block
10066 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
10067 v.copyOf(v0)
10068 v0.AuxInt = off
10069 v0.Aux = sym
10070 v0.AddArg2(ptr, mem)
10071 return true
10072 }
10073
10074
10075
10076 for {
10077 x := v_0
10078 if !(zeroUpper56Bits(x, 3)) {
10079 break
10080 }
10081 v.copyOf(x)
10082 return true
10083 }
10084
10085
10086 for {
10087 if v_0.Op != OpAMD64ANDLconst {
10088 break
10089 }
10090 c := v_0.AuxInt
10091 x := v_0.Args[0]
10092 v.reset(OpAMD64ANDLconst)
10093 v.AuxInt = c & 0xff
10094 v.AddArg(x)
10095 return true
10096 }
10097
10098
10099 for {
10100 if v_0.Op != OpAMD64MOVBQZX {
10101 break
10102 }
10103 x := v_0.Args[0]
10104 v.reset(OpAMD64MOVBQZX)
10105 v.AddArg(x)
10106 return true
10107 }
10108 return false
10109 }
10110 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10111 v_1 := v.Args[1]
10112 v_0 := v.Args[0]
10113
10114
10115
10116 for {
10117 off1 := v.AuxInt
10118 sym := v.Aux
10119 if v_0.Op != OpAMD64ADDQconst {
10120 break
10121 }
10122 off2 := v_0.AuxInt
10123 ptr := v_0.Args[0]
10124 mem := v_1
10125 if !(is32Bit(off1 + off2)) {
10126 break
10127 }
10128 v.reset(OpAMD64MOVBatomicload)
10129 v.AuxInt = off1 + off2
10130 v.Aux = sym
10131 v.AddArg2(ptr, mem)
10132 return true
10133 }
10134
10135
10136
10137 for {
10138 off1 := v.AuxInt
10139 sym1 := v.Aux
10140 if v_0.Op != OpAMD64LEAQ {
10141 break
10142 }
10143 off2 := v_0.AuxInt
10144 sym2 := v_0.Aux
10145 ptr := v_0.Args[0]
10146 mem := v_1
10147 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
10148 break
10149 }
10150 v.reset(OpAMD64MOVBatomicload)
10151 v.AuxInt = off1 + off2
10152 v.Aux = mergeSym(sym1, sym2)
10153 v.AddArg2(ptr, mem)
10154 return true
10155 }
10156 return false
10157 }
10158 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10159 v_1 := v.Args[1]
10160 v_0 := v.Args[0]
10161
10162
10163
10164 for {
10165 off := v.AuxInt
10166 sym := v.Aux
10167 ptr := v_0
10168 if v_1.Op != OpAMD64MOVBstore {
10169 break
10170 }
10171 off2 := v_1.AuxInt
10172 sym2 := v_1.Aux
10173 x := v_1.Args[1]
10174 ptr2 := v_1.Args[0]
10175 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10176 break
10177 }
10178 v.reset(OpAMD64MOVBQZX)
10179 v.AddArg(x)
10180 return true
10181 }
10182
10183
10184
10185 for {
10186 off1 := v.AuxInt
10187 sym := v.Aux
10188 if v_0.Op != OpAMD64ADDQconst {
10189 break
10190 }
10191 off2 := v_0.AuxInt
10192 ptr := v_0.Args[0]
10193 mem := v_1
10194 if !(is32Bit(off1 + off2)) {
10195 break
10196 }
10197 v.reset(OpAMD64MOVBload)
10198 v.AuxInt = off1 + off2
10199 v.Aux = sym
10200 v.AddArg2(ptr, mem)
10201 return true
10202 }
10203
10204
10205
10206 for {
10207 off1 := v.AuxInt
10208 sym1 := v.Aux
10209 if v_0.Op != OpAMD64LEAQ {
10210 break
10211 }
10212 off2 := v_0.AuxInt
10213 sym2 := v_0.Aux
10214 base := v_0.Args[0]
10215 mem := v_1
10216 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
10217 break
10218 }
10219 v.reset(OpAMD64MOVBload)
10220 v.AuxInt = off1 + off2
10221 v.Aux = mergeSym(sym1, sym2)
10222 v.AddArg2(base, mem)
10223 return true
10224 }
10225
10226
10227
10228 for {
10229 off1 := v.AuxInt
10230 sym1 := v.Aux
10231 if v_0.Op != OpAMD64LEAL {
10232 break
10233 }
10234 off2 := v_0.AuxInt
10235 sym2 := v_0.Aux
10236 base := v_0.Args[0]
10237 mem := v_1
10238 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
10239 break
10240 }
10241 v.reset(OpAMD64MOVBload)
10242 v.AuxInt = off1 + off2
10243 v.Aux = mergeSym(sym1, sym2)
10244 v.AddArg2(base, mem)
10245 return true
10246 }
10247
10248
10249
10250 for {
10251 off1 := v.AuxInt
10252 sym := v.Aux
10253 if v_0.Op != OpAMD64ADDLconst {
10254 break
10255 }
10256 off2 := v_0.AuxInt
10257 ptr := v_0.Args[0]
10258 mem := v_1
10259 if !(is32Bit(off1 + off2)) {
10260 break
10261 }
10262 v.reset(OpAMD64MOVBload)
10263 v.AuxInt = off1 + off2
10264 v.Aux = sym
10265 v.AddArg2(ptr, mem)
10266 return true
10267 }
10268
10269
10270
10271 for {
10272 off := v.AuxInt
10273 sym := v.Aux
10274 if v_0.Op != OpSB || !(symIsRO(sym)) {
10275 break
10276 }
10277 v.reset(OpAMD64MOVLconst)
10278 v.AuxInt = int64(read8(sym, off))
10279 return true
10280 }
10281 return false
10282 }
10283 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10284 v_2 := v.Args[2]
10285 v_1 := v.Args[1]
10286 v_0 := v.Args[0]
10287 b := v.Block
10288 typ := &b.Func.Config.Types
10289
10290
10291
10292 for {
10293 off := v.AuxInt
10294 sym := v.Aux
10295 ptr := v_0
10296 y := v_1
10297 if y.Op != OpAMD64SETL {
10298 break
10299 }
10300 x := y.Args[0]
10301 mem := v_2
10302 if !(y.Uses == 1) {
10303 break
10304 }
10305 v.reset(OpAMD64SETLstore)
10306 v.AuxInt = off
10307 v.Aux = sym
10308 v.AddArg3(ptr, x, mem)
10309 return true
10310 }
10311
10312
10313
10314 for {
10315 off := v.AuxInt
10316 sym := v.Aux
10317 ptr := v_0
10318 y := v_1
10319 if y.Op != OpAMD64SETLE {
10320 break
10321 }
10322 x := y.Args[0]
10323 mem := v_2
10324 if !(y.Uses == 1) {
10325 break
10326 }
10327 v.reset(OpAMD64SETLEstore)
10328 v.AuxInt = off
10329 v.Aux = sym
10330 v.AddArg3(ptr, x, mem)
10331 return true
10332 }
10333
10334
10335
10336 for {
10337 off := v.AuxInt
10338 sym := v.Aux
10339 ptr := v_0
10340 y := v_1
10341 if y.Op != OpAMD64SETG {
10342 break
10343 }
10344 x := y.Args[0]
10345 mem := v_2
10346 if !(y.Uses == 1) {
10347 break
10348 }
10349 v.reset(OpAMD64SETGstore)
10350 v.AuxInt = off
10351 v.Aux = sym
10352 v.AddArg3(ptr, x, mem)
10353 return true
10354 }
10355
10356
10357
10358 for {
10359 off := v.AuxInt
10360 sym := v.Aux
10361 ptr := v_0
10362 y := v_1
10363 if y.Op != OpAMD64SETGE {
10364 break
10365 }
10366 x := y.Args[0]
10367 mem := v_2
10368 if !(y.Uses == 1) {
10369 break
10370 }
10371 v.reset(OpAMD64SETGEstore)
10372 v.AuxInt = off
10373 v.Aux = sym
10374 v.AddArg3(ptr, x, mem)
10375 return true
10376 }
10377
10378
10379
10380 for {
10381 off := v.AuxInt
10382 sym := v.Aux
10383 ptr := v_0
10384 y := v_1
10385 if y.Op != OpAMD64SETEQ {
10386 break
10387 }
10388 x := y.Args[0]
10389 mem := v_2
10390 if !(y.Uses == 1) {
10391 break
10392 }
10393 v.reset(OpAMD64SETEQstore)
10394 v.AuxInt = off
10395 v.Aux = sym
10396 v.AddArg3(ptr, x, mem)
10397 return true
10398 }
10399
10400
10401
10402 for {
10403 off := v.AuxInt
10404 sym := v.Aux
10405 ptr := v_0
10406 y := v_1
10407 if y.Op != OpAMD64SETNE {
10408 break
10409 }
10410 x := y.Args[0]
10411 mem := v_2
10412 if !(y.Uses == 1) {
10413 break
10414 }
10415 v.reset(OpAMD64SETNEstore)
10416 v.AuxInt = off
10417 v.Aux = sym
10418 v.AddArg3(ptr, x, mem)
10419 return true
10420 }
10421
10422
10423
10424 for {
10425 off := v.AuxInt
10426 sym := v.Aux
10427 ptr := v_0
10428 y := v_1
10429 if y.Op != OpAMD64SETB {
10430 break
10431 }
10432 x := y.Args[0]
10433 mem := v_2
10434 if !(y.Uses == 1) {
10435 break
10436 }
10437 v.reset(OpAMD64SETBstore)
10438 v.AuxInt = off
10439 v.Aux = sym
10440 v.AddArg3(ptr, x, mem)
10441 return true
10442 }
10443
10444
10445
10446 for {
10447 off := v.AuxInt
10448 sym := v.Aux
10449 ptr := v_0
10450 y := v_1
10451 if y.Op != OpAMD64SETBE {
10452 break
10453 }
10454 x := y.Args[0]
10455 mem := v_2
10456 if !(y.Uses == 1) {
10457 break
10458 }
10459 v.reset(OpAMD64SETBEstore)
10460 v.AuxInt = off
10461 v.Aux = sym
10462 v.AddArg3(ptr, x, mem)
10463 return true
10464 }
10465
10466
10467
10468 for {
10469 off := v.AuxInt
10470 sym := v.Aux
10471 ptr := v_0
10472 y := v_1
10473 if y.Op != OpAMD64SETA {
10474 break
10475 }
10476 x := y.Args[0]
10477 mem := v_2
10478 if !(y.Uses == 1) {
10479 break
10480 }
10481 v.reset(OpAMD64SETAstore)
10482 v.AuxInt = off
10483 v.Aux = sym
10484 v.AddArg3(ptr, x, mem)
10485 return true
10486 }
10487
10488
10489
10490 for {
10491 off := v.AuxInt
10492 sym := v.Aux
10493 ptr := v_0
10494 y := v_1
10495 if y.Op != OpAMD64SETAE {
10496 break
10497 }
10498 x := y.Args[0]
10499 mem := v_2
10500 if !(y.Uses == 1) {
10501 break
10502 }
10503 v.reset(OpAMD64SETAEstore)
10504 v.AuxInt = off
10505 v.Aux = sym
10506 v.AddArg3(ptr, x, mem)
10507 return true
10508 }
10509
10510
10511 for {
10512 off := v.AuxInt
10513 sym := v.Aux
10514 ptr := v_0
10515 if v_1.Op != OpAMD64MOVBQSX {
10516 break
10517 }
10518 x := v_1.Args[0]
10519 mem := v_2
10520 v.reset(OpAMD64MOVBstore)
10521 v.AuxInt = off
10522 v.Aux = sym
10523 v.AddArg3(ptr, x, mem)
10524 return true
10525 }
10526
10527
10528 for {
10529 off := v.AuxInt
10530 sym := v.Aux
10531 ptr := v_0
10532 if v_1.Op != OpAMD64MOVBQZX {
10533 break
10534 }
10535 x := v_1.Args[0]
10536 mem := v_2
10537 v.reset(OpAMD64MOVBstore)
10538 v.AuxInt = off
10539 v.Aux = sym
10540 v.AddArg3(ptr, x, mem)
10541 return true
10542 }
10543
10544
10545
10546 for {
10547 off1 := v.AuxInt
10548 sym := v.Aux
10549 if v_0.Op != OpAMD64ADDQconst {
10550 break
10551 }
10552 off2 := v_0.AuxInt
10553 ptr := v_0.Args[0]
10554 val := v_1
10555 mem := v_2
10556 if !(is32Bit(off1 + off2)) {
10557 break
10558 }
10559 v.reset(OpAMD64MOVBstore)
10560 v.AuxInt = off1 + off2
10561 v.Aux = sym
10562 v.AddArg3(ptr, val, mem)
10563 return true
10564 }
10565
10566
10567
10568 for {
10569 off := v.AuxInt
10570 sym := v.Aux
10571 ptr := v_0
10572 if v_1.Op != OpAMD64MOVLconst {
10573 break
10574 }
10575 c := v_1.AuxInt
10576 mem := v_2
10577 if !(validOff(off)) {
10578 break
10579 }
10580 v.reset(OpAMD64MOVBstoreconst)
10581 v.AuxInt = makeValAndOff(int64(int8(c)), off)
10582 v.Aux = sym
10583 v.AddArg2(ptr, mem)
10584 return true
10585 }
10586
10587
10588
10589 for {
10590 off := v.AuxInt
10591 sym := v.Aux
10592 ptr := v_0
10593 if v_1.Op != OpAMD64MOVQconst {
10594 break
10595 }
10596 c := v_1.AuxInt
10597 mem := v_2
10598 if !(validOff(off)) {
10599 break
10600 }
10601 v.reset(OpAMD64MOVBstoreconst)
10602 v.AuxInt = makeValAndOff(int64(int8(c)), off)
10603 v.Aux = sym
10604 v.AddArg2(ptr, mem)
10605 return true
10606 }
10607
10608
10609
10610 for {
10611 off1 := v.AuxInt
10612 sym1 := v.Aux
10613 if v_0.Op != OpAMD64LEAQ {
10614 break
10615 }
10616 off2 := v_0.AuxInt
10617 sym2 := v_0.Aux
10618 base := v_0.Args[0]
10619 val := v_1
10620 mem := v_2
10621 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
10622 break
10623 }
10624 v.reset(OpAMD64MOVBstore)
10625 v.AuxInt = off1 + off2
10626 v.Aux = mergeSym(sym1, sym2)
10627 v.AddArg3(base, val, mem)
10628 return true
10629 }
10630
10631
10632
10633 for {
10634 i := v.AuxInt
10635 s := v.Aux
10636 p := v_0
10637 w := v_1
10638 x0 := v_2
10639 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s {
10640 break
10641 }
10642 mem := x0.Args[2]
10643 if p != x0.Args[0] {
10644 break
10645 }
10646 x0_1 := x0.Args[1]
10647 if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10648 break
10649 }
10650 v.reset(OpAMD64MOVWstore)
10651 v.AuxInt = i - 1
10652 v.Aux = s
10653 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10654 v0.AuxInt = 8
10655 v0.AddArg(w)
10656 v.AddArg3(p, v0, mem)
10657 return true
10658 }
10659
10660
10661
10662 for {
10663 i := v.AuxInt
10664 s := v.Aux
10665 p1 := v_0
10666 w := v_1
10667 x0 := v_2
10668 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s {
10669 break
10670 }
10671 mem := x0.Args[2]
10672 p0 := x0.Args[0]
10673 x0_1 := x0.Args[1]
10674 if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10675 break
10676 }
10677 v.reset(OpAMD64MOVWstore)
10678 v.AuxInt = i
10679 v.Aux = s
10680 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10681 v0.AuxInt = 8
10682 v0.AddArg(w)
10683 v.AddArg3(p0, v0, mem)
10684 return true
10685 }
10686
10687
10688
10689 for {
10690 i := v.AuxInt
10691 s := v.Aux
10692 p := v_0
10693 w := v_1
10694 x2 := v_2
10695 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-1 || x2.Aux != s {
10696 break
10697 }
10698 _ = x2.Args[2]
10699 if p != x2.Args[0] {
10700 break
10701 }
10702 x2_1 := x2.Args[1]
10703 if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] {
10704 break
10705 }
10706 x1 := x2.Args[2]
10707 if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s {
10708 break
10709 }
10710 _ = x1.Args[2]
10711 if p != x1.Args[0] {
10712 break
10713 }
10714 x1_1 := x1.Args[1]
10715 if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
10716 break
10717 }
10718 x0 := x1.Args[2]
10719 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-3 || x0.Aux != s {
10720 break
10721 }
10722 mem := x0.Args[2]
10723 if p != x0.Args[0] {
10724 break
10725 }
10726 x0_1 := x0.Args[1]
10727 if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10728 break
10729 }
10730 v.reset(OpAMD64MOVLstore)
10731 v.AuxInt = i - 3
10732 v.Aux = s
10733 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10734 v0.AddArg(w)
10735 v.AddArg3(p, v0, mem)
10736 return true
10737 }
10738
10739
10740
10741 for {
10742 i := v.AuxInt
10743 s := v.Aux
10744 p3 := v_0
10745 w := v_1
10746 x2 := v_2
10747 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i || x2.Aux != s {
10748 break
10749 }
10750 _ = x2.Args[2]
10751 p2 := x2.Args[0]
10752 x2_1 := x2.Args[1]
10753 if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] {
10754 break
10755 }
10756 x1 := x2.Args[2]
10757 if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i || x1.Aux != s {
10758 break
10759 }
10760 _ = x1.Args[2]
10761 p1 := x1.Args[0]
10762 x1_1 := x1.Args[1]
10763 if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] {
10764 break
10765 }
10766 x0 := x1.Args[2]
10767 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s {
10768 break
10769 }
10770 mem := x0.Args[2]
10771 p0 := x0.Args[0]
10772 x0_1 := x0.Args[1]
10773 if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10774 break
10775 }
10776 v.reset(OpAMD64MOVLstore)
10777 v.AuxInt = i
10778 v.Aux = s
10779 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10780 v0.AddArg(w)
10781 v.AddArg3(p0, v0, mem)
10782 return true
10783 }
10784
10785
10786
10787 for {
10788 i := v.AuxInt
10789 s := v.Aux
10790 p := v_0
10791 w := v_1
10792 x6 := v_2
10793 if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i-1 || x6.Aux != s {
10794 break
10795 }
10796 _ = x6.Args[2]
10797 if p != x6.Args[0] {
10798 break
10799 }
10800 x6_1 := x6.Args[1]
10801 if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] {
10802 break
10803 }
10804 x5 := x6.Args[2]
10805 if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i-2 || x5.Aux != s {
10806 break
10807 }
10808 _ = x5.Args[2]
10809 if p != x5.Args[0] {
10810 break
10811 }
10812 x5_1 := x5.Args[1]
10813 if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] {
10814 break
10815 }
10816 x4 := x5.Args[2]
10817 if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i-3 || x4.Aux != s {
10818 break
10819 }
10820 _ = x4.Args[2]
10821 if p != x4.Args[0] {
10822 break
10823 }
10824 x4_1 := x4.Args[1]
10825 if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] {
10826 break
10827 }
10828 x3 := x4.Args[2]
10829 if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s {
10830 break
10831 }
10832 _ = x3.Args[2]
10833 if p != x3.Args[0] {
10834 break
10835 }
10836 x3_1 := x3.Args[1]
10837 if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
10838 break
10839 }
10840 x2 := x3.Args[2]
10841 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-5 || x2.Aux != s {
10842 break
10843 }
10844 _ = x2.Args[2]
10845 if p != x2.Args[0] {
10846 break
10847 }
10848 x2_1 := x2.Args[1]
10849 if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] {
10850 break
10851 }
10852 x1 := x2.Args[2]
10853 if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-6 || x1.Aux != s {
10854 break
10855 }
10856 _ = x1.Args[2]
10857 if p != x1.Args[0] {
10858 break
10859 }
10860 x1_1 := x1.Args[1]
10861 if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] {
10862 break
10863 }
10864 x0 := x1.Args[2]
10865 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-7 || x0.Aux != s {
10866 break
10867 }
10868 mem := x0.Args[2]
10869 if p != x0.Args[0] {
10870 break
10871 }
10872 x0_1 := x0.Args[1]
10873 if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10874 break
10875 }
10876 v.reset(OpAMD64MOVQstore)
10877 v.AuxInt = i - 7
10878 v.Aux = s
10879 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10880 v0.AddArg(w)
10881 v.AddArg3(p, v0, mem)
10882 return true
10883 }
10884
10885
10886
10887 for {
10888 i := v.AuxInt
10889 s := v.Aux
10890 p7 := v_0
10891 w := v_1
10892 x6 := v_2
10893 if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i || x6.Aux != s {
10894 break
10895 }
10896 _ = x6.Args[2]
10897 p6 := x6.Args[0]
10898 x6_1 := x6.Args[1]
10899 if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] {
10900 break
10901 }
10902 x5 := x6.Args[2]
10903 if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i || x5.Aux != s {
10904 break
10905 }
10906 _ = x5.Args[2]
10907 p5 := x5.Args[0]
10908 x5_1 := x5.Args[1]
10909 if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] {
10910 break
10911 }
10912 x4 := x5.Args[2]
10913 if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i || x4.Aux != s {
10914 break
10915 }
10916 _ = x4.Args[2]
10917 p4 := x4.Args[0]
10918 x4_1 := x4.Args[1]
10919 if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] {
10920 break
10921 }
10922 x3 := x4.Args[2]
10923 if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i || x3.Aux != s {
10924 break
10925 }
10926 _ = x3.Args[2]
10927 p3 := x3.Args[0]
10928 x3_1 := x3.Args[1]
10929 if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] {
10930 break
10931 }
10932 x2 := x3.Args[2]
10933 if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i || x2.Aux != s {
10934 break
10935 }
10936 _ = x2.Args[2]
10937 p2 := x2.Args[0]
10938 x2_1 := x2.Args[1]
10939 if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] {
10940 break
10941 }
10942 x1 := x2.Args[2]
10943 if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i || x1.Aux != s {
10944 break
10945 }
10946 _ = x1.Args[2]
10947 p1 := x1.Args[0]
10948 x1_1 := x1.Args[1]
10949 if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] {
10950 break
10951 }
10952 x0 := x1.Args[2]
10953 if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s {
10954 break
10955 }
10956 mem := x0.Args[2]
10957 p0 := x0.Args[0]
10958 x0_1 := x0.Args[1]
10959 if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10960 break
10961 }
10962 v.reset(OpAMD64MOVQstore)
10963 v.AuxInt = i
10964 v.Aux = s
10965 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10966 v0.AddArg(w)
10967 v.AddArg3(p0, v0, mem)
10968 return true
10969 }
10970
10971
10972
10973 for {
10974 i := v.AuxInt
10975 s := v.Aux
10976 p := v_0
10977 if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 {
10978 break
10979 }
10980 w := v_1.Args[0]
10981 x := v_2
10982 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
10983 break
10984 }
10985 mem := x.Args[2]
10986 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10987 break
10988 }
10989 v.reset(OpAMD64MOVWstore)
10990 v.AuxInt = i - 1
10991 v.Aux = s
10992 v.AddArg3(p, w, mem)
10993 return true
10994 }
10995
10996
10997
10998 for {
10999 i := v.AuxInt
11000 s := v.Aux
11001 p := v_0
11002 if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 {
11003 break
11004 }
11005 w := v_1.Args[0]
11006 x := v_2
11007 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
11008 break
11009 }
11010 mem := x.Args[2]
11011 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11012 break
11013 }
11014 v.reset(OpAMD64MOVWstore)
11015 v.AuxInt = i - 1
11016 v.Aux = s
11017 v.AddArg3(p, w, mem)
11018 return true
11019 }
11020
11021
11022
11023 for {
11024 i := v.AuxInt
11025 s := v.Aux
11026 p := v_0
11027 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 {
11028 break
11029 }
11030 w := v_1.Args[0]
11031 x := v_2
11032 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
11033 break
11034 }
11035 mem := x.Args[2]
11036 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11037 break
11038 }
11039 v.reset(OpAMD64MOVWstore)
11040 v.AuxInt = i - 1
11041 v.Aux = s
11042 v.AddArg3(p, w, mem)
11043 return true
11044 }
11045
11046
11047
11048 for {
11049 i := v.AuxInt
11050 s := v.Aux
11051 p := v_0
11052 w := v_1
11053 x := v_2
11054 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s {
11055 break
11056 }
11057 mem := x.Args[2]
11058 if p != x.Args[0] {
11059 break
11060 }
11061 x_1 := x.Args[1]
11062 if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11063 break
11064 }
11065 v.reset(OpAMD64MOVWstore)
11066 v.AuxInt = i
11067 v.Aux = s
11068 v.AddArg3(p, w, mem)
11069 return true
11070 }
11071
11072
11073
11074 for {
11075 i := v.AuxInt
11076 s := v.Aux
11077 p := v_0
11078 w := v_1
11079 x := v_2
11080 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s {
11081 break
11082 }
11083 mem := x.Args[2]
11084 if p != x.Args[0] {
11085 break
11086 }
11087 x_1 := x.Args[1]
11088 if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11089 break
11090 }
11091 v.reset(OpAMD64MOVWstore)
11092 v.AuxInt = i
11093 v.Aux = s
11094 v.AddArg3(p, w, mem)
11095 return true
11096 }
11097
11098
11099
11100 for {
11101 i := v.AuxInt
11102 s := v.Aux
11103 p := v_0
11104 w := v_1
11105 x := v_2
11106 if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s {
11107 break
11108 }
11109 mem := x.Args[2]
11110 if p != x.Args[0] {
11111 break
11112 }
11113 x_1 := x.Args[1]
11114 if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
11115 break
11116 }
11117 v.reset(OpAMD64MOVWstore)
11118 v.AuxInt = i
11119 v.Aux = s
11120 v.AddArg3(p, w, mem)
11121 return true
11122 }
11123
11124
11125
11126 for {
11127 i := v.AuxInt
11128 s := v.Aux
11129 p := v_0
11130 if v_1.Op != OpAMD64SHRLconst {
11131 break
11132 }
11133 j := v_1.AuxInt
11134 w := v_1.Args[0]
11135 x := v_2
11136 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
11137 break
11138 }
11139 mem := x.Args[2]
11140 if p != x.Args[0] {
11141 break
11142 }
11143 w0 := x.Args[1]
11144 if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11145 break
11146 }
11147 v.reset(OpAMD64MOVWstore)
11148 v.AuxInt = i - 1
11149 v.Aux = s
11150 v.AddArg3(p, w0, mem)
11151 return true
11152 }
11153
11154
11155
11156 for {
11157 i := v.AuxInt
11158 s := v.Aux
11159 p := v_0
11160 if v_1.Op != OpAMD64SHRQconst {
11161 break
11162 }
11163 j := v_1.AuxInt
11164 w := v_1.Args[0]
11165 x := v_2
11166 if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s {
11167 break
11168 }
11169 mem := x.Args[2]
11170 if p != x.Args[0] {
11171 break
11172 }
11173 w0 := x.Args[1]
11174 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11175 break
11176 }
11177 v.reset(OpAMD64MOVWstore)
11178 v.AuxInt = i - 1
11179 v.Aux = s
11180 v.AddArg3(p, w0, mem)
11181 return true
11182 }
11183
11184
11185
11186 for {
11187 i := v.AuxInt
11188 s := v.Aux
11189 p1 := v_0
11190 if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 {
11191 break
11192 }
11193 w := v_1.Args[0]
11194 x := v_2
11195 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11196 break
11197 }
11198 mem := x.Args[2]
11199 p0 := x.Args[0]
11200 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11201 break
11202 }
11203 v.reset(OpAMD64MOVWstore)
11204 v.AuxInt = i
11205 v.Aux = s
11206 v.AddArg3(p0, w, mem)
11207 return true
11208 }
11209
11210
11211
11212 for {
11213 i := v.AuxInt
11214 s := v.Aux
11215 p1 := v_0
11216 if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 {
11217 break
11218 }
11219 w := v_1.Args[0]
11220 x := v_2
11221 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11222 break
11223 }
11224 mem := x.Args[2]
11225 p0 := x.Args[0]
11226 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11227 break
11228 }
11229 v.reset(OpAMD64MOVWstore)
11230 v.AuxInt = i
11231 v.Aux = s
11232 v.AddArg3(p0, w, mem)
11233 return true
11234 }
11235
11236
11237
11238 for {
11239 i := v.AuxInt
11240 s := v.Aux
11241 p1 := v_0
11242 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 {
11243 break
11244 }
11245 w := v_1.Args[0]
11246 x := v_2
11247 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11248 break
11249 }
11250 mem := x.Args[2]
11251 p0 := x.Args[0]
11252 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11253 break
11254 }
11255 v.reset(OpAMD64MOVWstore)
11256 v.AuxInt = i
11257 v.Aux = s
11258 v.AddArg3(p0, w, mem)
11259 return true
11260 }
11261
11262
11263
11264 for {
11265 i := v.AuxInt
11266 s := v.Aux
11267 p0 := v_0
11268 w := v_1
11269 x := v_2
11270 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11271 break
11272 }
11273 mem := x.Args[2]
11274 p1 := x.Args[0]
11275 x_1 := x.Args[1]
11276 if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11277 break
11278 }
11279 v.reset(OpAMD64MOVWstore)
11280 v.AuxInt = i
11281 v.Aux = s
11282 v.AddArg3(p0, w, mem)
11283 return true
11284 }
11285
11286
11287
11288 for {
11289 i := v.AuxInt
11290 s := v.Aux
11291 p0 := v_0
11292 w := v_1
11293 x := v_2
11294 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11295 break
11296 }
11297 mem := x.Args[2]
11298 p1 := x.Args[0]
11299 x_1 := x.Args[1]
11300 if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11301 break
11302 }
11303 v.reset(OpAMD64MOVWstore)
11304 v.AuxInt = i
11305 v.Aux = s
11306 v.AddArg3(p0, w, mem)
11307 return true
11308 }
11309
11310
11311
11312 for {
11313 i := v.AuxInt
11314 s := v.Aux
11315 p0 := v_0
11316 w := v_1
11317 x := v_2
11318 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11319 break
11320 }
11321 mem := x.Args[2]
11322 p1 := x.Args[0]
11323 x_1 := x.Args[1]
11324 if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11325 break
11326 }
11327 v.reset(OpAMD64MOVWstore)
11328 v.AuxInt = i
11329 v.Aux = s
11330 v.AddArg3(p0, w, mem)
11331 return true
11332 }
11333
11334
11335
11336 for {
11337 i := v.AuxInt
11338 s := v.Aux
11339 p1 := v_0
11340 if v_1.Op != OpAMD64SHRLconst {
11341 break
11342 }
11343 j := v_1.AuxInt
11344 w := v_1.Args[0]
11345 x := v_2
11346 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11347 break
11348 }
11349 mem := x.Args[2]
11350 p0 := x.Args[0]
11351 w0 := x.Args[1]
11352 if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11353 break
11354 }
11355 v.reset(OpAMD64MOVWstore)
11356 v.AuxInt = i
11357 v.Aux = s
11358 v.AddArg3(p0, w0, mem)
11359 return true
11360 }
11361
11362
11363
11364 for {
11365 i := v.AuxInt
11366 s := v.Aux
11367 p1 := v_0
11368 if v_1.Op != OpAMD64SHRQconst {
11369 break
11370 }
11371 j := v_1.AuxInt
11372 w := v_1.Args[0]
11373 x := v_2
11374 if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s {
11375 break
11376 }
11377 mem := x.Args[2]
11378 p0 := x.Args[0]
11379 w0 := x.Args[1]
11380 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11381 break
11382 }
11383 v.reset(OpAMD64MOVWstore)
11384 v.AuxInt = i
11385 v.Aux = s
11386 v.AddArg3(p0, w0, mem)
11387 return true
11388 }
11389
11390
11391
11392 for {
11393 i := v.AuxInt
11394 s := v.Aux
11395 p := v_0
11396 x1 := v_1
11397 if x1.Op != OpAMD64MOVBload {
11398 break
11399 }
11400 j := x1.AuxInt
11401 s2 := x1.Aux
11402 mem := x1.Args[1]
11403 p2 := x1.Args[0]
11404 mem2 := v_2
11405 if mem2.Op != OpAMD64MOVBstore || mem2.AuxInt != i-1 || mem2.Aux != s {
11406 break
11407 }
11408 _ = mem2.Args[2]
11409 if p != mem2.Args[0] {
11410 break
11411 }
11412 x2 := mem2.Args[1]
11413 if x2.Op != OpAMD64MOVBload || x2.AuxInt != j-1 || x2.Aux != s2 {
11414 break
11415 }
11416 _ = x2.Args[1]
11417 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11418 break
11419 }
11420 v.reset(OpAMD64MOVWstore)
11421 v.AuxInt = i - 1
11422 v.Aux = s
11423 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11424 v0.AuxInt = j - 1
11425 v0.Aux = s2
11426 v0.AddArg2(p2, mem)
11427 v.AddArg3(p, v0, mem)
11428 return true
11429 }
11430
11431
11432
11433 for {
11434 off1 := v.AuxInt
11435 sym1 := v.Aux
11436 if v_0.Op != OpAMD64LEAL {
11437 break
11438 }
11439 off2 := v_0.AuxInt
11440 sym2 := v_0.Aux
11441 base := v_0.Args[0]
11442 val := v_1
11443 mem := v_2
11444 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
11445 break
11446 }
11447 v.reset(OpAMD64MOVBstore)
11448 v.AuxInt = off1 + off2
11449 v.Aux = mergeSym(sym1, sym2)
11450 v.AddArg3(base, val, mem)
11451 return true
11452 }
11453
11454
11455
11456 for {
11457 off1 := v.AuxInt
11458 sym := v.Aux
11459 if v_0.Op != OpAMD64ADDLconst {
11460 break
11461 }
11462 off2 := v_0.AuxInt
11463 ptr := v_0.Args[0]
11464 val := v_1
11465 mem := v_2
11466 if !(is32Bit(off1 + off2)) {
11467 break
11468 }
11469 v.reset(OpAMD64MOVBstore)
11470 v.AuxInt = off1 + off2
11471 v.Aux = sym
11472 v.AddArg3(ptr, val, mem)
11473 return true
11474 }
11475 return false
11476 }
11477 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11478 v_1 := v.Args[1]
11479 v_0 := v.Args[0]
11480
11481
11482
11483 for {
11484 sc := v.AuxInt
11485 s := v.Aux
11486 if v_0.Op != OpAMD64ADDQconst {
11487 break
11488 }
11489 off := v_0.AuxInt
11490 ptr := v_0.Args[0]
11491 mem := v_1
11492 if !(ValAndOff(sc).canAdd(off)) {
11493 break
11494 }
11495 v.reset(OpAMD64MOVBstoreconst)
11496 v.AuxInt = ValAndOff(sc).add(off)
11497 v.Aux = s
11498 v.AddArg2(ptr, mem)
11499 return true
11500 }
11501
11502
11503
11504 for {
11505 sc := v.AuxInt
11506 sym1 := v.Aux
11507 if v_0.Op != OpAMD64LEAQ {
11508 break
11509 }
11510 off := v_0.AuxInt
11511 sym2 := v_0.Aux
11512 ptr := v_0.Args[0]
11513 mem := v_1
11514 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
11515 break
11516 }
11517 v.reset(OpAMD64MOVBstoreconst)
11518 v.AuxInt = ValAndOff(sc).add(off)
11519 v.Aux = mergeSym(sym1, sym2)
11520 v.AddArg2(ptr, mem)
11521 return true
11522 }
11523
11524
11525
11526 for {
11527 c := v.AuxInt
11528 s := v.Aux
11529 p := v_0
11530 x := v_1
11531 if x.Op != OpAMD64MOVBstoreconst {
11532 break
11533 }
11534 a := x.AuxInt
11535 if x.Aux != s {
11536 break
11537 }
11538 mem := x.Args[1]
11539 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
11540 break
11541 }
11542 v.reset(OpAMD64MOVWstoreconst)
11543 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
11544 v.Aux = s
11545 v.AddArg2(p, mem)
11546 return true
11547 }
11548
11549
11550
11551 for {
11552 a := v.AuxInt
11553 s := v.Aux
11554 p := v_0
11555 x := v_1
11556 if x.Op != OpAMD64MOVBstoreconst {
11557 break
11558 }
11559 c := x.AuxInt
11560 if x.Aux != s {
11561 break
11562 }
11563 mem := x.Args[1]
11564 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
11565 break
11566 }
11567 v.reset(OpAMD64MOVWstoreconst)
11568 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
11569 v.Aux = s
11570 v.AddArg2(p, mem)
11571 return true
11572 }
11573
11574
11575
11576 for {
11577 sc := v.AuxInt
11578 sym1 := v.Aux
11579 if v_0.Op != OpAMD64LEAL {
11580 break
11581 }
11582 off := v_0.AuxInt
11583 sym2 := v_0.Aux
11584 ptr := v_0.Args[0]
11585 mem := v_1
11586 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
11587 break
11588 }
11589 v.reset(OpAMD64MOVBstoreconst)
11590 v.AuxInt = ValAndOff(sc).add(off)
11591 v.Aux = mergeSym(sym1, sym2)
11592 v.AddArg2(ptr, mem)
11593 return true
11594 }
11595
11596
11597
11598 for {
11599 sc := v.AuxInt
11600 s := v.Aux
11601 if v_0.Op != OpAMD64ADDLconst {
11602 break
11603 }
11604 off := v_0.AuxInt
11605 ptr := v_0.Args[0]
11606 mem := v_1
11607 if !(ValAndOff(sc).canAdd(off)) {
11608 break
11609 }
11610 v.reset(OpAMD64MOVBstoreconst)
11611 v.AuxInt = ValAndOff(sc).add(off)
11612 v.Aux = s
11613 v.AddArg2(ptr, mem)
11614 return true
11615 }
11616 return false
11617 }
11618 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11619 v_0 := v.Args[0]
11620 b := v.Block
11621
11622
11623
11624 for {
11625 x := v_0
11626 if x.Op != OpAMD64MOVLload {
11627 break
11628 }
11629 off := x.AuxInt
11630 sym := x.Aux
11631 mem := x.Args[1]
11632 ptr := x.Args[0]
11633 if !(x.Uses == 1 && clobber(x)) {
11634 break
11635 }
11636 b = x.Block
11637 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11638 v.copyOf(v0)
11639 v0.AuxInt = off
11640 v0.Aux = sym
11641 v0.AddArg2(ptr, mem)
11642 return true
11643 }
11644
11645
11646
11647 for {
11648 x := v_0
11649 if x.Op != OpAMD64MOVQload {
11650 break
11651 }
11652 off := x.AuxInt
11653 sym := x.Aux
11654 mem := x.Args[1]
11655 ptr := x.Args[0]
11656 if !(x.Uses == 1 && clobber(x)) {
11657 break
11658 }
11659 b = x.Block
11660 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11661 v.copyOf(v0)
11662 v0.AuxInt = off
11663 v0.Aux = sym
11664 v0.AddArg2(ptr, mem)
11665 return true
11666 }
11667
11668
11669
11670 for {
11671 if v_0.Op != OpAMD64ANDLconst {
11672 break
11673 }
11674 c := v_0.AuxInt
11675 x := v_0.Args[0]
11676 if !(c&0x80000000 == 0) {
11677 break
11678 }
11679 v.reset(OpAMD64ANDLconst)
11680 v.AuxInt = c & 0x7fffffff
11681 v.AddArg(x)
11682 return true
11683 }
11684
11685
11686 for {
11687 if v_0.Op != OpAMD64MOVLQSX {
11688 break
11689 }
11690 x := v_0.Args[0]
11691 v.reset(OpAMD64MOVLQSX)
11692 v.AddArg(x)
11693 return true
11694 }
11695
11696
11697 for {
11698 if v_0.Op != OpAMD64MOVWQSX {
11699 break
11700 }
11701 x := v_0.Args[0]
11702 v.reset(OpAMD64MOVWQSX)
11703 v.AddArg(x)
11704 return true
11705 }
11706
11707
11708 for {
11709 if v_0.Op != OpAMD64MOVBQSX {
11710 break
11711 }
11712 x := v_0.Args[0]
11713 v.reset(OpAMD64MOVBQSX)
11714 v.AddArg(x)
11715 return true
11716 }
11717 return false
11718 }
11719 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11720 v_1 := v.Args[1]
11721 v_0 := v.Args[0]
11722
11723
11724
11725 for {
11726 off := v.AuxInt
11727 sym := v.Aux
11728 ptr := v_0
11729 if v_1.Op != OpAMD64MOVLstore {
11730 break
11731 }
11732 off2 := v_1.AuxInt
11733 sym2 := v_1.Aux
11734 x := v_1.Args[1]
11735 ptr2 := v_1.Args[0]
11736 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11737 break
11738 }
11739 v.reset(OpAMD64MOVLQSX)
11740 v.AddArg(x)
11741 return true
11742 }
11743
11744
11745
11746 for {
11747 off1 := v.AuxInt
11748 sym1 := v.Aux
11749 if v_0.Op != OpAMD64LEAQ {
11750 break
11751 }
11752 off2 := v_0.AuxInt
11753 sym2 := v_0.Aux
11754 base := v_0.Args[0]
11755 mem := v_1
11756 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11757 break
11758 }
11759 v.reset(OpAMD64MOVLQSXload)
11760 v.AuxInt = off1 + off2
11761 v.Aux = mergeSym(sym1, sym2)
11762 v.AddArg2(base, mem)
11763 return true
11764 }
11765 return false
11766 }
11767 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11768 v_0 := v.Args[0]
11769 b := v.Block
11770
11771
11772
11773 for {
11774 x := v_0
11775 if x.Op != OpAMD64MOVLload {
11776 break
11777 }
11778 off := x.AuxInt
11779 sym := x.Aux
11780 mem := x.Args[1]
11781 ptr := x.Args[0]
11782 if !(x.Uses == 1 && clobber(x)) {
11783 break
11784 }
11785 b = x.Block
11786 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11787 v.copyOf(v0)
11788 v0.AuxInt = off
11789 v0.Aux = sym
11790 v0.AddArg2(ptr, mem)
11791 return true
11792 }
11793
11794
11795
11796 for {
11797 x := v_0
11798 if x.Op != OpAMD64MOVQload {
11799 break
11800 }
11801 off := x.AuxInt
11802 sym := x.Aux
11803 mem := x.Args[1]
11804 ptr := x.Args[0]
11805 if !(x.Uses == 1 && clobber(x)) {
11806 break
11807 }
11808 b = x.Block
11809 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11810 v.copyOf(v0)
11811 v0.AuxInt = off
11812 v0.Aux = sym
11813 v0.AddArg2(ptr, mem)
11814 return true
11815 }
11816
11817
11818
11819 for {
11820 x := v_0
11821 if !(zeroUpper32Bits(x, 3)) {
11822 break
11823 }
11824 v.copyOf(x)
11825 return true
11826 }
11827
11828
11829 for {
11830 if v_0.Op != OpAMD64ANDLconst {
11831 break
11832 }
11833 c := v_0.AuxInt
11834 x := v_0.Args[0]
11835 v.reset(OpAMD64ANDLconst)
11836 v.AuxInt = c
11837 v.AddArg(x)
11838 return true
11839 }
11840
11841
11842 for {
11843 if v_0.Op != OpAMD64MOVLQZX {
11844 break
11845 }
11846 x := v_0.Args[0]
11847 v.reset(OpAMD64MOVLQZX)
11848 v.AddArg(x)
11849 return true
11850 }
11851
11852
11853 for {
11854 if v_0.Op != OpAMD64MOVWQZX {
11855 break
11856 }
11857 x := v_0.Args[0]
11858 v.reset(OpAMD64MOVWQZX)
11859 v.AddArg(x)
11860 return true
11861 }
11862
11863
11864 for {
11865 if v_0.Op != OpAMD64MOVBQZX {
11866 break
11867 }
11868 x := v_0.Args[0]
11869 v.reset(OpAMD64MOVBQZX)
11870 v.AddArg(x)
11871 return true
11872 }
11873 return false
11874 }
11875 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
11876 v_1 := v.Args[1]
11877 v_0 := v.Args[0]
11878
11879
11880
11881 for {
11882 off1 := v.AuxInt
11883 sym := v.Aux
11884 if v_0.Op != OpAMD64ADDQconst {
11885 break
11886 }
11887 off2 := v_0.AuxInt
11888 ptr := v_0.Args[0]
11889 mem := v_1
11890 if !(is32Bit(off1 + off2)) {
11891 break
11892 }
11893 v.reset(OpAMD64MOVLatomicload)
11894 v.AuxInt = off1 + off2
11895 v.Aux = sym
11896 v.AddArg2(ptr, mem)
11897 return true
11898 }
11899
11900
11901
11902 for {
11903 off1 := v.AuxInt
11904 sym1 := v.Aux
11905 if v_0.Op != OpAMD64LEAQ {
11906 break
11907 }
11908 off2 := v_0.AuxInt
11909 sym2 := v_0.Aux
11910 ptr := v_0.Args[0]
11911 mem := v_1
11912 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11913 break
11914 }
11915 v.reset(OpAMD64MOVLatomicload)
11916 v.AuxInt = off1 + off2
11917 v.Aux = mergeSym(sym1, sym2)
11918 v.AddArg2(ptr, mem)
11919 return true
11920 }
11921 return false
11922 }
11923 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
11924 v_0 := v.Args[0]
11925 b := v.Block
11926
11927
11928
11929 for {
11930 t := v.Type
11931 if v_0.Op != OpArg {
11932 break
11933 }
11934 u := v_0.Type
11935 off := v_0.AuxInt
11936 sym := v_0.Aux
11937 if !(t.Size() == u.Size()) {
11938 break
11939 }
11940 b = b.Func.Entry
11941 v0 := b.NewValue0(v.Pos, OpArg, t)
11942 v.copyOf(v0)
11943 v0.AuxInt = off
11944 v0.Aux = sym
11945 return true
11946 }
11947 return false
11948 }
11949 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
11950 v_0 := v.Args[0]
11951 b := v.Block
11952
11953
11954
11955 for {
11956 t := v.Type
11957 if v_0.Op != OpArg {
11958 break
11959 }
11960 u := v_0.Type
11961 off := v_0.AuxInt
11962 sym := v_0.Aux
11963 if !(t.Size() == u.Size()) {
11964 break
11965 }
11966 b = b.Func.Entry
11967 v0 := b.NewValue0(v.Pos, OpArg, t)
11968 v.copyOf(v0)
11969 v0.AuxInt = off
11970 v0.Aux = sym
11971 return true
11972 }
11973 return false
11974 }
11975 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
11976 v_1 := v.Args[1]
11977 v_0 := v.Args[0]
11978 b := v.Block
11979 config := b.Func.Config
11980
11981
11982
11983 for {
11984 off := v.AuxInt
11985 sym := v.Aux
11986 ptr := v_0
11987 if v_1.Op != OpAMD64MOVLstore {
11988 break
11989 }
11990 off2 := v_1.AuxInt
11991 sym2 := v_1.Aux
11992 x := v_1.Args[1]
11993 ptr2 := v_1.Args[0]
11994 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11995 break
11996 }
11997 v.reset(OpAMD64MOVLQZX)
11998 v.AddArg(x)
11999 return true
12000 }
12001
12002
12003
12004 for {
12005 off1 := v.AuxInt
12006 sym := v.Aux
12007 if v_0.Op != OpAMD64ADDQconst {
12008 break
12009 }
12010 off2 := v_0.AuxInt
12011 ptr := v_0.Args[0]
12012 mem := v_1
12013 if !(is32Bit(off1 + off2)) {
12014 break
12015 }
12016 v.reset(OpAMD64MOVLload)
12017 v.AuxInt = off1 + off2
12018 v.Aux = sym
12019 v.AddArg2(ptr, mem)
12020 return true
12021 }
12022
12023
12024
12025 for {
12026 off1 := v.AuxInt
12027 sym1 := v.Aux
12028 if v_0.Op != OpAMD64LEAQ {
12029 break
12030 }
12031 off2 := v_0.AuxInt
12032 sym2 := v_0.Aux
12033 base := v_0.Args[0]
12034 mem := v_1
12035 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12036 break
12037 }
12038 v.reset(OpAMD64MOVLload)
12039 v.AuxInt = off1 + off2
12040 v.Aux = mergeSym(sym1, sym2)
12041 v.AddArg2(base, mem)
12042 return true
12043 }
12044
12045
12046
12047 for {
12048 off1 := v.AuxInt
12049 sym1 := v.Aux
12050 if v_0.Op != OpAMD64LEAL {
12051 break
12052 }
12053 off2 := v_0.AuxInt
12054 sym2 := v_0.Aux
12055 base := v_0.Args[0]
12056 mem := v_1
12057 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
12058 break
12059 }
12060 v.reset(OpAMD64MOVLload)
12061 v.AuxInt = off1 + off2
12062 v.Aux = mergeSym(sym1, sym2)
12063 v.AddArg2(base, mem)
12064 return true
12065 }
12066
12067
12068
12069 for {
12070 off1 := v.AuxInt
12071 sym := v.Aux
12072 if v_0.Op != OpAMD64ADDLconst {
12073 break
12074 }
12075 off2 := v_0.AuxInt
12076 ptr := v_0.Args[0]
12077 mem := v_1
12078 if !(is32Bit(off1 + off2)) {
12079 break
12080 }
12081 v.reset(OpAMD64MOVLload)
12082 v.AuxInt = off1 + off2
12083 v.Aux = sym
12084 v.AddArg2(ptr, mem)
12085 return true
12086 }
12087
12088
12089 for {
12090 off := v.AuxInt
12091 sym := v.Aux
12092 ptr := v_0
12093 if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym {
12094 break
12095 }
12096 val := v_1.Args[1]
12097 if ptr != v_1.Args[0] {
12098 break
12099 }
12100 v.reset(OpAMD64MOVLf2i)
12101 v.AddArg(val)
12102 return true
12103 }
12104
12105
12106
12107 for {
12108 off := v.AuxInt
12109 sym := v.Aux
12110 if v_0.Op != OpSB || !(symIsRO(sym)) {
12111 break
12112 }
12113 v.reset(OpAMD64MOVQconst)
12114 v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder))
12115 return true
12116 }
12117 return false
12118 }
12119 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
12120 v_2 := v.Args[2]
12121 v_1 := v.Args[1]
12122 v_0 := v.Args[0]
12123 b := v.Block
12124 typ := &b.Func.Config.Types
12125
12126
12127 for {
12128 off := v.AuxInt
12129 sym := v.Aux
12130 ptr := v_0
12131 if v_1.Op != OpAMD64MOVLQSX {
12132 break
12133 }
12134 x := v_1.Args[0]
12135 mem := v_2
12136 v.reset(OpAMD64MOVLstore)
12137 v.AuxInt = off
12138 v.Aux = sym
12139 v.AddArg3(ptr, x, mem)
12140 return true
12141 }
12142
12143
12144 for {
12145 off := v.AuxInt
12146 sym := v.Aux
12147 ptr := v_0
12148 if v_1.Op != OpAMD64MOVLQZX {
12149 break
12150 }
12151 x := v_1.Args[0]
12152 mem := v_2
12153 v.reset(OpAMD64MOVLstore)
12154 v.AuxInt = off
12155 v.Aux = sym
12156 v.AddArg3(ptr, x, mem)
12157 return true
12158 }
12159
12160
12161
12162 for {
12163 off1 := v.AuxInt
12164 sym := v.Aux
12165 if v_0.Op != OpAMD64ADDQconst {
12166 break
12167 }
12168 off2 := v_0.AuxInt
12169 ptr := v_0.Args[0]
12170 val := v_1
12171 mem := v_2
12172 if !(is32Bit(off1 + off2)) {
12173 break
12174 }
12175 v.reset(OpAMD64MOVLstore)
12176 v.AuxInt = off1 + off2
12177 v.Aux = sym
12178 v.AddArg3(ptr, val, mem)
12179 return true
12180 }
12181
12182
12183
12184 for {
12185 off := v.AuxInt
12186 sym := v.Aux
12187 ptr := v_0
12188 if v_1.Op != OpAMD64MOVLconst {
12189 break
12190 }
12191 c := v_1.AuxInt
12192 mem := v_2
12193 if !(validOff(off)) {
12194 break
12195 }
12196 v.reset(OpAMD64MOVLstoreconst)
12197 v.AuxInt = makeValAndOff(int64(int32(c)), off)
12198 v.Aux = sym
12199 v.AddArg2(ptr, mem)
12200 return true
12201 }
12202
12203
12204
12205 for {
12206 off := v.AuxInt
12207 sym := v.Aux
12208 ptr := v_0
12209 if v_1.Op != OpAMD64MOVQconst {
12210 break
12211 }
12212 c := v_1.AuxInt
12213 mem := v_2
12214 if !(validOff(off)) {
12215 break
12216 }
12217 v.reset(OpAMD64MOVLstoreconst)
12218 v.AuxInt = makeValAndOff(int64(int32(c)), off)
12219 v.Aux = sym
12220 v.AddArg2(ptr, mem)
12221 return true
12222 }
12223
12224
12225
12226 for {
12227 off1 := v.AuxInt
12228 sym1 := v.Aux
12229 if v_0.Op != OpAMD64LEAQ {
12230 break
12231 }
12232 off2 := v_0.AuxInt
12233 sym2 := v_0.Aux
12234 base := v_0.Args[0]
12235 val := v_1
12236 mem := v_2
12237 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12238 break
12239 }
12240 v.reset(OpAMD64MOVLstore)
12241 v.AuxInt = off1 + off2
12242 v.Aux = mergeSym(sym1, sym2)
12243 v.AddArg3(base, val, mem)
12244 return true
12245 }
12246
12247
12248
12249 for {
12250 i := v.AuxInt
12251 s := v.Aux
12252 p := v_0
12253 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 {
12254 break
12255 }
12256 w := v_1.Args[0]
12257 x := v_2
12258 if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s {
12259 break
12260 }
12261 mem := x.Args[2]
12262 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
12263 break
12264 }
12265 v.reset(OpAMD64MOVQstore)
12266 v.AuxInt = i - 4
12267 v.Aux = s
12268 v.AddArg3(p, w, mem)
12269 return true
12270 }
12271
12272
12273
12274 for {
12275 i := v.AuxInt
12276 s := v.Aux
12277 p := v_0
12278 if v_1.Op != OpAMD64SHRQconst {
12279 break
12280 }
12281 j := v_1.AuxInt
12282 w := v_1.Args[0]
12283 x := v_2
12284 if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s {
12285 break
12286 }
12287 mem := x.Args[2]
12288 if p != x.Args[0] {
12289 break
12290 }
12291 w0 := x.Args[1]
12292 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
12293 break
12294 }
12295 v.reset(OpAMD64MOVQstore)
12296 v.AuxInt = i - 4
12297 v.Aux = s
12298 v.AddArg3(p, w0, mem)
12299 return true
12300 }
12301
12302
12303
12304 for {
12305 i := v.AuxInt
12306 s := v.Aux
12307 p1 := v_0
12308 if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 {
12309 break
12310 }
12311 w := v_1.Args[0]
12312 x := v_2
12313 if x.Op != OpAMD64MOVLstore || x.AuxInt != i || x.Aux != s {
12314 break
12315 }
12316 mem := x.Args[2]
12317 p0 := x.Args[0]
12318 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12319 break
12320 }
12321 v.reset(OpAMD64MOVQstore)
12322 v.AuxInt = i
12323 v.Aux = s
12324 v.AddArg3(p0, w, mem)
12325 return true
12326 }
12327
12328
12329
12330 for {
12331 i := v.AuxInt
12332 s := v.Aux
12333 p1 := v_0
12334 if v_1.Op != OpAMD64SHRQconst {
12335 break
12336 }
12337 j := v_1.AuxInt
12338 w := v_1.Args[0]
12339 x := v_2
12340 if x.Op != OpAMD64MOVLstore || x.AuxInt != i || x.Aux != s {
12341 break
12342 }
12343 mem := x.Args[2]
12344 p0 := x.Args[0]
12345 w0 := x.Args[1]
12346 if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
12347 break
12348 }
12349 v.reset(OpAMD64MOVQstore)
12350 v.AuxInt = i
12351 v.Aux = s
12352 v.AddArg3(p0, w0, mem)
12353 return true
12354 }
12355
12356
12357
12358 for {
12359 i := v.AuxInt
12360 s := v.Aux
12361 p := v_0
12362 x1 := v_1
12363 if x1.Op != OpAMD64MOVLload {
12364 break
12365 }
12366 j := x1.AuxInt
12367 s2 := x1.Aux
12368 mem := x1.Args[1]
12369 p2 := x1.Args[0]
12370 mem2 := v_2
12371 if mem2.Op != OpAMD64MOVLstore || mem2.AuxInt != i-4 || mem2.Aux != s {
12372 break
12373 }
12374 _ = mem2.Args[2]
12375 if p != mem2.Args[0] {
12376 break
12377 }
12378 x2 := mem2.Args[1]
12379 if x2.Op != OpAMD64MOVLload || x2.AuxInt != j-4 || x2.Aux != s2 {
12380 break
12381 }
12382 _ = x2.Args[1]
12383 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
12384 break
12385 }
12386 v.reset(OpAMD64MOVQstore)
12387 v.AuxInt = i - 4
12388 v.Aux = s
12389 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
12390 v0.AuxInt = j - 4
12391 v0.Aux = s2
12392 v0.AddArg2(p2, mem)
12393 v.AddArg3(p, v0, mem)
12394 return true
12395 }
12396
12397
12398
12399 for {
12400 off1 := v.AuxInt
12401 sym1 := v.Aux
12402 if v_0.Op != OpAMD64LEAL {
12403 break
12404 }
12405 off2 := v_0.AuxInt
12406 sym2 := v_0.Aux
12407 base := v_0.Args[0]
12408 val := v_1
12409 mem := v_2
12410 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
12411 break
12412 }
12413 v.reset(OpAMD64MOVLstore)
12414 v.AuxInt = off1 + off2
12415 v.Aux = mergeSym(sym1, sym2)
12416 v.AddArg3(base, val, mem)
12417 return true
12418 }
12419
12420
12421
12422 for {
12423 off1 := v.AuxInt
12424 sym := v.Aux
12425 if v_0.Op != OpAMD64ADDLconst {
12426 break
12427 }
12428 off2 := v_0.AuxInt
12429 ptr := v_0.Args[0]
12430 val := v_1
12431 mem := v_2
12432 if !(is32Bit(off1 + off2)) {
12433 break
12434 }
12435 v.reset(OpAMD64MOVLstore)
12436 v.AuxInt = off1 + off2
12437 v.Aux = sym
12438 v.AddArg3(ptr, val, mem)
12439 return true
12440 }
12441
12442
12443
12444 for {
12445 off := v.AuxInt
12446 sym := v.Aux
12447 ptr := v_0
12448 y := v_1
12449 if y.Op != OpAMD64ADDLload || y.AuxInt != off || y.Aux != sym {
12450 break
12451 }
12452 mem := y.Args[2]
12453 x := y.Args[0]
12454 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12455 break
12456 }
12457 v.reset(OpAMD64ADDLmodify)
12458 v.AuxInt = off
12459 v.Aux = sym
12460 v.AddArg3(ptr, x, mem)
12461 return true
12462 }
12463
12464
12465
12466 for {
12467 off := v.AuxInt
12468 sym := v.Aux
12469 ptr := v_0
12470 y := v_1
12471 if y.Op != OpAMD64ANDLload || y.AuxInt != off || y.Aux != sym {
12472 break
12473 }
12474 mem := y.Args[2]
12475 x := y.Args[0]
12476 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12477 break
12478 }
12479 v.reset(OpAMD64ANDLmodify)
12480 v.AuxInt = off
12481 v.Aux = sym
12482 v.AddArg3(ptr, x, mem)
12483 return true
12484 }
12485
12486
12487
12488 for {
12489 off := v.AuxInt
12490 sym := v.Aux
12491 ptr := v_0
12492 y := v_1
12493 if y.Op != OpAMD64ORLload || y.AuxInt != off || y.Aux != sym {
12494 break
12495 }
12496 mem := y.Args[2]
12497 x := y.Args[0]
12498 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12499 break
12500 }
12501 v.reset(OpAMD64ORLmodify)
12502 v.AuxInt = off
12503 v.Aux = sym
12504 v.AddArg3(ptr, x, mem)
12505 return true
12506 }
12507
12508
12509
12510 for {
12511 off := v.AuxInt
12512 sym := v.Aux
12513 ptr := v_0
12514 y := v_1
12515 if y.Op != OpAMD64XORLload || y.AuxInt != off || y.Aux != sym {
12516 break
12517 }
12518 mem := y.Args[2]
12519 x := y.Args[0]
12520 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12521 break
12522 }
12523 v.reset(OpAMD64XORLmodify)
12524 v.AuxInt = off
12525 v.Aux = sym
12526 v.AddArg3(ptr, x, mem)
12527 return true
12528 }
12529
12530
12531
12532 for {
12533 off := v.AuxInt
12534 sym := v.Aux
12535 ptr := v_0
12536 y := v_1
12537 if y.Op != OpAMD64ADDL {
12538 break
12539 }
12540 _ = y.Args[1]
12541 y_0 := y.Args[0]
12542 y_1 := y.Args[1]
12543 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12544 l := y_0
12545 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12546 continue
12547 }
12548 mem := l.Args[1]
12549 if ptr != l.Args[0] {
12550 continue
12551 }
12552 x := y_1
12553 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12554 continue
12555 }
12556 v.reset(OpAMD64ADDLmodify)
12557 v.AuxInt = off
12558 v.Aux = sym
12559 v.AddArg3(ptr, x, mem)
12560 return true
12561 }
12562 break
12563 }
12564
12565
12566
12567 for {
12568 off := v.AuxInt
12569 sym := v.Aux
12570 ptr := v_0
12571 y := v_1
12572 if y.Op != OpAMD64SUBL {
12573 break
12574 }
12575 x := y.Args[1]
12576 l := y.Args[0]
12577 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12578 break
12579 }
12580 mem := l.Args[1]
12581 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12582 break
12583 }
12584 v.reset(OpAMD64SUBLmodify)
12585 v.AuxInt = off
12586 v.Aux = sym
12587 v.AddArg3(ptr, x, mem)
12588 return true
12589 }
12590
12591
12592
12593 for {
12594 off := v.AuxInt
12595 sym := v.Aux
12596 ptr := v_0
12597 y := v_1
12598 if y.Op != OpAMD64ANDL {
12599 break
12600 }
12601 _ = y.Args[1]
12602 y_0 := y.Args[0]
12603 y_1 := y.Args[1]
12604 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12605 l := y_0
12606 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12607 continue
12608 }
12609 mem := l.Args[1]
12610 if ptr != l.Args[0] {
12611 continue
12612 }
12613 x := y_1
12614 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12615 continue
12616 }
12617 v.reset(OpAMD64ANDLmodify)
12618 v.AuxInt = off
12619 v.Aux = sym
12620 v.AddArg3(ptr, x, mem)
12621 return true
12622 }
12623 break
12624 }
12625
12626
12627
12628 for {
12629 off := v.AuxInt
12630 sym := v.Aux
12631 ptr := v_0
12632 y := v_1
12633 if y.Op != OpAMD64ORL {
12634 break
12635 }
12636 _ = y.Args[1]
12637 y_0 := y.Args[0]
12638 y_1 := y.Args[1]
12639 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12640 l := y_0
12641 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12642 continue
12643 }
12644 mem := l.Args[1]
12645 if ptr != l.Args[0] {
12646 continue
12647 }
12648 x := y_1
12649 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12650 continue
12651 }
12652 v.reset(OpAMD64ORLmodify)
12653 v.AuxInt = off
12654 v.Aux = sym
12655 v.AddArg3(ptr, x, mem)
12656 return true
12657 }
12658 break
12659 }
12660
12661
12662
12663 for {
12664 off := v.AuxInt
12665 sym := v.Aux
12666 ptr := v_0
12667 y := v_1
12668 if y.Op != OpAMD64XORL {
12669 break
12670 }
12671 _ = y.Args[1]
12672 y_0 := y.Args[0]
12673 y_1 := y.Args[1]
12674 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12675 l := y_0
12676 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12677 continue
12678 }
12679 mem := l.Args[1]
12680 if ptr != l.Args[0] {
12681 continue
12682 }
12683 x := y_1
12684 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12685 continue
12686 }
12687 v.reset(OpAMD64XORLmodify)
12688 v.AuxInt = off
12689 v.Aux = sym
12690 v.AddArg3(ptr, x, mem)
12691 return true
12692 }
12693 break
12694 }
12695
12696
12697
12698 for {
12699 off := v.AuxInt
12700 sym := v.Aux
12701 ptr := v_0
12702 y := v_1
12703 if y.Op != OpAMD64BTCL {
12704 break
12705 }
12706 x := y.Args[1]
12707 l := y.Args[0]
12708 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12709 break
12710 }
12711 mem := l.Args[1]
12712 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12713 break
12714 }
12715 v.reset(OpAMD64BTCLmodify)
12716 v.AuxInt = off
12717 v.Aux = sym
12718 v.AddArg3(ptr, x, mem)
12719 return true
12720 }
12721
12722
12723
12724 for {
12725 off := v.AuxInt
12726 sym := v.Aux
12727 ptr := v_0
12728 y := v_1
12729 if y.Op != OpAMD64BTRL {
12730 break
12731 }
12732 x := y.Args[1]
12733 l := y.Args[0]
12734 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12735 break
12736 }
12737 mem := l.Args[1]
12738 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12739 break
12740 }
12741 v.reset(OpAMD64BTRLmodify)
12742 v.AuxInt = off
12743 v.Aux = sym
12744 v.AddArg3(ptr, x, mem)
12745 return true
12746 }
12747
12748
12749
12750 for {
12751 off := v.AuxInt
12752 sym := v.Aux
12753 ptr := v_0
12754 y := v_1
12755 if y.Op != OpAMD64BTSL {
12756 break
12757 }
12758 x := y.Args[1]
12759 l := y.Args[0]
12760 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12761 break
12762 }
12763 mem := l.Args[1]
12764 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12765 break
12766 }
12767 v.reset(OpAMD64BTSLmodify)
12768 v.AuxInt = off
12769 v.Aux = sym
12770 v.AddArg3(ptr, x, mem)
12771 return true
12772 }
12773
12774
12775
12776 for {
12777 off := v.AuxInt
12778 sym := v.Aux
12779 ptr := v_0
12780 a := v_1
12781 if a.Op != OpAMD64ADDLconst {
12782 break
12783 }
12784 c := a.AuxInt
12785 l := a.Args[0]
12786 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12787 break
12788 }
12789 mem := l.Args[1]
12790 ptr2 := l.Args[0]
12791 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12792 break
12793 }
12794 v.reset(OpAMD64ADDLconstmodify)
12795 v.AuxInt = makeValAndOff(c, off)
12796 v.Aux = sym
12797 v.AddArg2(ptr, mem)
12798 return true
12799 }
12800
12801
12802
12803 for {
12804 off := v.AuxInt
12805 sym := v.Aux
12806 ptr := v_0
12807 a := v_1
12808 if a.Op != OpAMD64ANDLconst {
12809 break
12810 }
12811 c := a.AuxInt
12812 l := a.Args[0]
12813 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12814 break
12815 }
12816 mem := l.Args[1]
12817 ptr2 := l.Args[0]
12818 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12819 break
12820 }
12821 v.reset(OpAMD64ANDLconstmodify)
12822 v.AuxInt = makeValAndOff(c, off)
12823 v.Aux = sym
12824 v.AddArg2(ptr, mem)
12825 return true
12826 }
12827
12828
12829
12830 for {
12831 off := v.AuxInt
12832 sym := v.Aux
12833 ptr := v_0
12834 a := v_1
12835 if a.Op != OpAMD64ORLconst {
12836 break
12837 }
12838 c := a.AuxInt
12839 l := a.Args[0]
12840 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12841 break
12842 }
12843 mem := l.Args[1]
12844 ptr2 := l.Args[0]
12845 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12846 break
12847 }
12848 v.reset(OpAMD64ORLconstmodify)
12849 v.AuxInt = makeValAndOff(c, off)
12850 v.Aux = sym
12851 v.AddArg2(ptr, mem)
12852 return true
12853 }
12854
12855
12856
12857 for {
12858 off := v.AuxInt
12859 sym := v.Aux
12860 ptr := v_0
12861 a := v_1
12862 if a.Op != OpAMD64XORLconst {
12863 break
12864 }
12865 c := a.AuxInt
12866 l := a.Args[0]
12867 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12868 break
12869 }
12870 mem := l.Args[1]
12871 ptr2 := l.Args[0]
12872 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12873 break
12874 }
12875 v.reset(OpAMD64XORLconstmodify)
12876 v.AuxInt = makeValAndOff(c, off)
12877 v.Aux = sym
12878 v.AddArg2(ptr, mem)
12879 return true
12880 }
12881
12882
12883
12884 for {
12885 off := v.AuxInt
12886 sym := v.Aux
12887 ptr := v_0
12888 a := v_1
12889 if a.Op != OpAMD64BTCLconst {
12890 break
12891 }
12892 c := a.AuxInt
12893 l := a.Args[0]
12894 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12895 break
12896 }
12897 mem := l.Args[1]
12898 ptr2 := l.Args[0]
12899 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12900 break
12901 }
12902 v.reset(OpAMD64BTCLconstmodify)
12903 v.AuxInt = makeValAndOff(c, off)
12904 v.Aux = sym
12905 v.AddArg2(ptr, mem)
12906 return true
12907 }
12908
12909
12910
12911 for {
12912 off := v.AuxInt
12913 sym := v.Aux
12914 ptr := v_0
12915 a := v_1
12916 if a.Op != OpAMD64BTRLconst {
12917 break
12918 }
12919 c := a.AuxInt
12920 l := a.Args[0]
12921 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12922 break
12923 }
12924 mem := l.Args[1]
12925 ptr2 := l.Args[0]
12926 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12927 break
12928 }
12929 v.reset(OpAMD64BTRLconstmodify)
12930 v.AuxInt = makeValAndOff(c, off)
12931 v.Aux = sym
12932 v.AddArg2(ptr, mem)
12933 return true
12934 }
12935
12936
12937
12938 for {
12939 off := v.AuxInt
12940 sym := v.Aux
12941 ptr := v_0
12942 a := v_1
12943 if a.Op != OpAMD64BTSLconst {
12944 break
12945 }
12946 c := a.AuxInt
12947 l := a.Args[0]
12948 if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym {
12949 break
12950 }
12951 mem := l.Args[1]
12952 ptr2 := l.Args[0]
12953 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) {
12954 break
12955 }
12956 v.reset(OpAMD64BTSLconstmodify)
12957 v.AuxInt = makeValAndOff(c, off)
12958 v.Aux = sym
12959 v.AddArg2(ptr, mem)
12960 return true
12961 }
12962
12963
12964 for {
12965 off := v.AuxInt
12966 sym := v.Aux
12967 ptr := v_0
12968 if v_1.Op != OpAMD64MOVLf2i {
12969 break
12970 }
12971 val := v_1.Args[0]
12972 mem := v_2
12973 v.reset(OpAMD64MOVSSstore)
12974 v.AuxInt = off
12975 v.Aux = sym
12976 v.AddArg3(ptr, val, mem)
12977 return true
12978 }
12979 return false
12980 }
12981 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
12982 v_1 := v.Args[1]
12983 v_0 := v.Args[0]
12984 b := v.Block
12985 typ := &b.Func.Config.Types
12986
12987
12988
12989 for {
12990 sc := v.AuxInt
12991 s := v.Aux
12992 if v_0.Op != OpAMD64ADDQconst {
12993 break
12994 }
12995 off := v_0.AuxInt
12996 ptr := v_0.Args[0]
12997 mem := v_1
12998 if !(ValAndOff(sc).canAdd(off)) {
12999 break
13000 }
13001 v.reset(OpAMD64MOVLstoreconst)
13002 v.AuxInt = ValAndOff(sc).add(off)
13003 v.Aux = s
13004 v.AddArg2(ptr, mem)
13005 return true
13006 }
13007
13008
13009
13010 for {
13011 sc := v.AuxInt
13012 sym1 := v.Aux
13013 if v_0.Op != OpAMD64LEAQ {
13014 break
13015 }
13016 off := v_0.AuxInt
13017 sym2 := v_0.Aux
13018 ptr := v_0.Args[0]
13019 mem := v_1
13020 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
13021 break
13022 }
13023 v.reset(OpAMD64MOVLstoreconst)
13024 v.AuxInt = ValAndOff(sc).add(off)
13025 v.Aux = mergeSym(sym1, sym2)
13026 v.AddArg2(ptr, mem)
13027 return true
13028 }
13029
13030
13031
13032 for {
13033 c := v.AuxInt
13034 s := v.Aux
13035 p := v_0
13036 x := v_1
13037 if x.Op != OpAMD64MOVLstoreconst {
13038 break
13039 }
13040 a := x.AuxInt
13041 if x.Aux != s {
13042 break
13043 }
13044 mem := x.Args[1]
13045 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
13046 break
13047 }
13048 v.reset(OpAMD64MOVQstore)
13049 v.AuxInt = ValAndOff(a).Off()
13050 v.Aux = s
13051 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
13052 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
13053 v.AddArg3(p, v0, mem)
13054 return true
13055 }
13056
13057
13058
13059 for {
13060 a := v.AuxInt
13061 s := v.Aux
13062 p := v_0
13063 x := v_1
13064 if x.Op != OpAMD64MOVLstoreconst {
13065 break
13066 }
13067 c := x.AuxInt
13068 if x.Aux != s {
13069 break
13070 }
13071 mem := x.Args[1]
13072 if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
13073 break
13074 }
13075 v.reset(OpAMD64MOVQstore)
13076 v.AuxInt = ValAndOff(a).Off()
13077 v.Aux = s
13078 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
13079 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
13080 v.AddArg3(p, v0, mem)
13081 return true
13082 }
13083
13084
13085
13086 for {
13087 sc := v.AuxInt
13088 sym1 := v.Aux
13089 if v_0.Op != OpAMD64LEAL {
13090 break
13091 }
13092 off := v_0.AuxInt
13093 sym2 := v_0.Aux
13094 ptr := v_0.Args[0]
13095 mem := v_1
13096 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
13097 break
13098 }
13099 v.reset(OpAMD64MOVLstoreconst)
13100 v.AuxInt = ValAndOff(sc).add(off)
13101 v.Aux = mergeSym(sym1, sym2)
13102 v.AddArg2(ptr, mem)
13103 return true
13104 }
13105
13106
13107
13108 for {
13109 sc := v.AuxInt
13110 s := v.Aux
13111 if v_0.Op != OpAMD64ADDLconst {
13112 break
13113 }
13114 off := v_0.AuxInt
13115 ptr := v_0.Args[0]
13116 mem := v_1
13117 if !(ValAndOff(sc).canAdd(off)) {
13118 break
13119 }
13120 v.reset(OpAMD64MOVLstoreconst)
13121 v.AuxInt = ValAndOff(sc).add(off)
13122 v.Aux = s
13123 v.AddArg2(ptr, mem)
13124 return true
13125 }
13126 return false
13127 }
13128 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
13129 v_1 := v.Args[1]
13130 v_0 := v.Args[0]
13131
13132
13133
13134 for {
13135 off1 := v.AuxInt
13136 sym := v.Aux
13137 if v_0.Op != OpAMD64ADDQconst {
13138 break
13139 }
13140 off2 := v_0.AuxInt
13141 ptr := v_0.Args[0]
13142 mem := v_1
13143 if !(is32Bit(off1 + off2)) {
13144 break
13145 }
13146 v.reset(OpAMD64MOVOload)
13147 v.AuxInt = off1 + off2
13148 v.Aux = sym
13149 v.AddArg2(ptr, mem)
13150 return true
13151 }
13152
13153
13154
13155 for {
13156 off1 := v.AuxInt
13157 sym1 := v.Aux
13158 if v_0.Op != OpAMD64LEAQ {
13159 break
13160 }
13161 off2 := v_0.AuxInt
13162 sym2 := v_0.Aux
13163 base := v_0.Args[0]
13164 mem := v_1
13165 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
13166 break
13167 }
13168 v.reset(OpAMD64MOVOload)
13169 v.AuxInt = off1 + off2
13170 v.Aux = mergeSym(sym1, sym2)
13171 v.AddArg2(base, mem)
13172 return true
13173 }
13174 return false
13175 }
13176 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
13177 v_2 := v.Args[2]
13178 v_1 := v.Args[1]
13179 v_0 := v.Args[0]
13180 b := v.Block
13181 config := b.Func.Config
13182 typ := &b.Func.Config.Types
13183
13184
13185
13186 for {
13187 off1 := v.AuxInt
13188 sym := v.Aux
13189 if v_0.Op != OpAMD64ADDQconst {
13190 break
13191 }
13192 off2 := v_0.AuxInt
13193 ptr := v_0.Args[0]
13194 val := v_1
13195 mem := v_2
13196 if !(is32Bit(off1 + off2)) {
13197 break
13198 }
13199 v.reset(OpAMD64MOVOstore)
13200 v.AuxInt = off1 + off2
13201 v.Aux = sym
13202 v.AddArg3(ptr, val, mem)
13203 return true
13204 }
13205
13206
13207
13208 for {
13209 off1 := v.AuxInt
13210 sym1 := v.Aux
13211 if v_0.Op != OpAMD64LEAQ {
13212 break
13213 }
13214 off2 := v_0.AuxInt
13215 sym2 := v_0.Aux
13216 base := v_0.Args[0]
13217 val := v_1
13218 mem := v_2
13219 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
13220 break
13221 }
13222 v.reset(OpAMD64MOVOstore)
13223 v.AuxInt = off1 + off2
13224