1
2
3
4 package ssa
5
6 import "internal/buildcfg"
7 import "math"
8 import "cmd/internal/obj"
9 import "cmd/compile/internal/types"
10
11 func rewriteValueAMD64(v *Value) bool {
12 switch v.Op {
13 case OpAMD64ADCQ:
14 return rewriteValueAMD64_OpAMD64ADCQ(v)
15 case OpAMD64ADCQconst:
16 return rewriteValueAMD64_OpAMD64ADCQconst(v)
17 case OpAMD64ADDL:
18 return rewriteValueAMD64_OpAMD64ADDL(v)
19 case OpAMD64ADDLconst:
20 return rewriteValueAMD64_OpAMD64ADDLconst(v)
21 case OpAMD64ADDLconstmodify:
22 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
23 case OpAMD64ADDLload:
24 return rewriteValueAMD64_OpAMD64ADDLload(v)
25 case OpAMD64ADDLmodify:
26 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
27 case OpAMD64ADDQ:
28 return rewriteValueAMD64_OpAMD64ADDQ(v)
29 case OpAMD64ADDQcarry:
30 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
31 case OpAMD64ADDQconst:
32 return rewriteValueAMD64_OpAMD64ADDQconst(v)
33 case OpAMD64ADDQconstmodify:
34 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
35 case OpAMD64ADDQload:
36 return rewriteValueAMD64_OpAMD64ADDQload(v)
37 case OpAMD64ADDQmodify:
38 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
39 case OpAMD64ADDSD:
40 return rewriteValueAMD64_OpAMD64ADDSD(v)
41 case OpAMD64ADDSDload:
42 return rewriteValueAMD64_OpAMD64ADDSDload(v)
43 case OpAMD64ADDSS:
44 return rewriteValueAMD64_OpAMD64ADDSS(v)
45 case OpAMD64ADDSSload:
46 return rewriteValueAMD64_OpAMD64ADDSSload(v)
47 case OpAMD64ANDL:
48 return rewriteValueAMD64_OpAMD64ANDL(v)
49 case OpAMD64ANDLconst:
50 return rewriteValueAMD64_OpAMD64ANDLconst(v)
51 case OpAMD64ANDLconstmodify:
52 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
53 case OpAMD64ANDLload:
54 return rewriteValueAMD64_OpAMD64ANDLload(v)
55 case OpAMD64ANDLmodify:
56 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
57 case OpAMD64ANDNL:
58 return rewriteValueAMD64_OpAMD64ANDNL(v)
59 case OpAMD64ANDNQ:
60 return rewriteValueAMD64_OpAMD64ANDNQ(v)
61 case OpAMD64ANDQ:
62 return rewriteValueAMD64_OpAMD64ANDQ(v)
63 case OpAMD64ANDQconst:
64 return rewriteValueAMD64_OpAMD64ANDQconst(v)
65 case OpAMD64ANDQconstmodify:
66 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
67 case OpAMD64ANDQload:
68 return rewriteValueAMD64_OpAMD64ANDQload(v)
69 case OpAMD64ANDQmodify:
70 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
71 case OpAMD64BSFQ:
72 return rewriteValueAMD64_OpAMD64BSFQ(v)
73 case OpAMD64BSWAPL:
74 return rewriteValueAMD64_OpAMD64BSWAPL(v)
75 case OpAMD64BSWAPQ:
76 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
77 case OpAMD64BTCLconst:
78 return rewriteValueAMD64_OpAMD64BTCLconst(v)
79 case OpAMD64BTCQconst:
80 return rewriteValueAMD64_OpAMD64BTCQconst(v)
81 case OpAMD64BTLconst:
82 return rewriteValueAMD64_OpAMD64BTLconst(v)
83 case OpAMD64BTQconst:
84 return rewriteValueAMD64_OpAMD64BTQconst(v)
85 case OpAMD64BTRLconst:
86 return rewriteValueAMD64_OpAMD64BTRLconst(v)
87 case OpAMD64BTRQconst:
88 return rewriteValueAMD64_OpAMD64BTRQconst(v)
89 case OpAMD64BTSLconst:
90 return rewriteValueAMD64_OpAMD64BTSLconst(v)
91 case OpAMD64BTSQconst:
92 return rewriteValueAMD64_OpAMD64BTSQconst(v)
93 case OpAMD64CMOVLCC:
94 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
95 case OpAMD64CMOVLCS:
96 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
97 case OpAMD64CMOVLEQ:
98 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
99 case OpAMD64CMOVLGE:
100 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
101 case OpAMD64CMOVLGT:
102 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
103 case OpAMD64CMOVLHI:
104 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
105 case OpAMD64CMOVLLE:
106 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
107 case OpAMD64CMOVLLS:
108 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
109 case OpAMD64CMOVLLT:
110 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
111 case OpAMD64CMOVLNE:
112 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
113 case OpAMD64CMOVQCC:
114 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
115 case OpAMD64CMOVQCS:
116 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
117 case OpAMD64CMOVQEQ:
118 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
119 case OpAMD64CMOVQGE:
120 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
121 case OpAMD64CMOVQGT:
122 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
123 case OpAMD64CMOVQHI:
124 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
125 case OpAMD64CMOVQLE:
126 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
127 case OpAMD64CMOVQLS:
128 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
129 case OpAMD64CMOVQLT:
130 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
131 case OpAMD64CMOVQNE:
132 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
133 case OpAMD64CMOVWCC:
134 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
135 case OpAMD64CMOVWCS:
136 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
137 case OpAMD64CMOVWEQ:
138 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
139 case OpAMD64CMOVWGE:
140 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
141 case OpAMD64CMOVWGT:
142 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
143 case OpAMD64CMOVWHI:
144 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
145 case OpAMD64CMOVWLE:
146 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
147 case OpAMD64CMOVWLS:
148 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
149 case OpAMD64CMOVWLT:
150 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
151 case OpAMD64CMOVWNE:
152 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
153 case OpAMD64CMPB:
154 return rewriteValueAMD64_OpAMD64CMPB(v)
155 case OpAMD64CMPBconst:
156 return rewriteValueAMD64_OpAMD64CMPBconst(v)
157 case OpAMD64CMPBconstload:
158 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
159 case OpAMD64CMPBload:
160 return rewriteValueAMD64_OpAMD64CMPBload(v)
161 case OpAMD64CMPL:
162 return rewriteValueAMD64_OpAMD64CMPL(v)
163 case OpAMD64CMPLconst:
164 return rewriteValueAMD64_OpAMD64CMPLconst(v)
165 case OpAMD64CMPLconstload:
166 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
167 case OpAMD64CMPLload:
168 return rewriteValueAMD64_OpAMD64CMPLload(v)
169 case OpAMD64CMPQ:
170 return rewriteValueAMD64_OpAMD64CMPQ(v)
171 case OpAMD64CMPQconst:
172 return rewriteValueAMD64_OpAMD64CMPQconst(v)
173 case OpAMD64CMPQconstload:
174 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
175 case OpAMD64CMPQload:
176 return rewriteValueAMD64_OpAMD64CMPQload(v)
177 case OpAMD64CMPW:
178 return rewriteValueAMD64_OpAMD64CMPW(v)
179 case OpAMD64CMPWconst:
180 return rewriteValueAMD64_OpAMD64CMPWconst(v)
181 case OpAMD64CMPWconstload:
182 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
183 case OpAMD64CMPWload:
184 return rewriteValueAMD64_OpAMD64CMPWload(v)
185 case OpAMD64CMPXCHGLlock:
186 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
187 case OpAMD64CMPXCHGQlock:
188 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
189 case OpAMD64DIVSD:
190 return rewriteValueAMD64_OpAMD64DIVSD(v)
191 case OpAMD64DIVSDload:
192 return rewriteValueAMD64_OpAMD64DIVSDload(v)
193 case OpAMD64DIVSS:
194 return rewriteValueAMD64_OpAMD64DIVSS(v)
195 case OpAMD64DIVSSload:
196 return rewriteValueAMD64_OpAMD64DIVSSload(v)
197 case OpAMD64HMULL:
198 return rewriteValueAMD64_OpAMD64HMULL(v)
199 case OpAMD64HMULLU:
200 return rewriteValueAMD64_OpAMD64HMULLU(v)
201 case OpAMD64HMULQ:
202 return rewriteValueAMD64_OpAMD64HMULQ(v)
203 case OpAMD64HMULQU:
204 return rewriteValueAMD64_OpAMD64HMULQU(v)
205 case OpAMD64LEAL:
206 return rewriteValueAMD64_OpAMD64LEAL(v)
207 case OpAMD64LEAL1:
208 return rewriteValueAMD64_OpAMD64LEAL1(v)
209 case OpAMD64LEAL2:
210 return rewriteValueAMD64_OpAMD64LEAL2(v)
211 case OpAMD64LEAL4:
212 return rewriteValueAMD64_OpAMD64LEAL4(v)
213 case OpAMD64LEAL8:
214 return rewriteValueAMD64_OpAMD64LEAL8(v)
215 case OpAMD64LEAQ:
216 return rewriteValueAMD64_OpAMD64LEAQ(v)
217 case OpAMD64LEAQ1:
218 return rewriteValueAMD64_OpAMD64LEAQ1(v)
219 case OpAMD64LEAQ2:
220 return rewriteValueAMD64_OpAMD64LEAQ2(v)
221 case OpAMD64LEAQ4:
222 return rewriteValueAMD64_OpAMD64LEAQ4(v)
223 case OpAMD64LEAQ8:
224 return rewriteValueAMD64_OpAMD64LEAQ8(v)
225 case OpAMD64MOVBELstore:
226 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
227 case OpAMD64MOVBEQstore:
228 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
229 case OpAMD64MOVBQSX:
230 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
231 case OpAMD64MOVBQSXload:
232 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
233 case OpAMD64MOVBQZX:
234 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
235 case OpAMD64MOVBatomicload:
236 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
237 case OpAMD64MOVBload:
238 return rewriteValueAMD64_OpAMD64MOVBload(v)
239 case OpAMD64MOVBstore:
240 return rewriteValueAMD64_OpAMD64MOVBstore(v)
241 case OpAMD64MOVBstoreconst:
242 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
243 case OpAMD64MOVLQSX:
244 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
245 case OpAMD64MOVLQSXload:
246 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
247 case OpAMD64MOVLQZX:
248 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
249 case OpAMD64MOVLatomicload:
250 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
251 case OpAMD64MOVLf2i:
252 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
253 case OpAMD64MOVLi2f:
254 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
255 case OpAMD64MOVLload:
256 return rewriteValueAMD64_OpAMD64MOVLload(v)
257 case OpAMD64MOVLstore:
258 return rewriteValueAMD64_OpAMD64MOVLstore(v)
259 case OpAMD64MOVLstoreconst:
260 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
261 case OpAMD64MOVOload:
262 return rewriteValueAMD64_OpAMD64MOVOload(v)
263 case OpAMD64MOVOstore:
264 return rewriteValueAMD64_OpAMD64MOVOstore(v)
265 case OpAMD64MOVOstoreconst:
266 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
267 case OpAMD64MOVQatomicload:
268 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
269 case OpAMD64MOVQf2i:
270 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
271 case OpAMD64MOVQi2f:
272 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
273 case OpAMD64MOVQload:
274 return rewriteValueAMD64_OpAMD64MOVQload(v)
275 case OpAMD64MOVQstore:
276 return rewriteValueAMD64_OpAMD64MOVQstore(v)
277 case OpAMD64MOVQstoreconst:
278 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
279 case OpAMD64MOVSDload:
280 return rewriteValueAMD64_OpAMD64MOVSDload(v)
281 case OpAMD64MOVSDstore:
282 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
283 case OpAMD64MOVSSload:
284 return rewriteValueAMD64_OpAMD64MOVSSload(v)
285 case OpAMD64MOVSSstore:
286 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
287 case OpAMD64MOVWQSX:
288 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
289 case OpAMD64MOVWQSXload:
290 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
291 case OpAMD64MOVWQZX:
292 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
293 case OpAMD64MOVWload:
294 return rewriteValueAMD64_OpAMD64MOVWload(v)
295 case OpAMD64MOVWstore:
296 return rewriteValueAMD64_OpAMD64MOVWstore(v)
297 case OpAMD64MOVWstoreconst:
298 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
299 case OpAMD64MULL:
300 return rewriteValueAMD64_OpAMD64MULL(v)
301 case OpAMD64MULLconst:
302 return rewriteValueAMD64_OpAMD64MULLconst(v)
303 case OpAMD64MULQ:
304 return rewriteValueAMD64_OpAMD64MULQ(v)
305 case OpAMD64MULQconst:
306 return rewriteValueAMD64_OpAMD64MULQconst(v)
307 case OpAMD64MULSD:
308 return rewriteValueAMD64_OpAMD64MULSD(v)
309 case OpAMD64MULSDload:
310 return rewriteValueAMD64_OpAMD64MULSDload(v)
311 case OpAMD64MULSS:
312 return rewriteValueAMD64_OpAMD64MULSS(v)
313 case OpAMD64MULSSload:
314 return rewriteValueAMD64_OpAMD64MULSSload(v)
315 case OpAMD64NEGL:
316 return rewriteValueAMD64_OpAMD64NEGL(v)
317 case OpAMD64NEGQ:
318 return rewriteValueAMD64_OpAMD64NEGQ(v)
319 case OpAMD64NOTL:
320 return rewriteValueAMD64_OpAMD64NOTL(v)
321 case OpAMD64NOTQ:
322 return rewriteValueAMD64_OpAMD64NOTQ(v)
323 case OpAMD64ORL:
324 return rewriteValueAMD64_OpAMD64ORL(v)
325 case OpAMD64ORLconst:
326 return rewriteValueAMD64_OpAMD64ORLconst(v)
327 case OpAMD64ORLconstmodify:
328 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
329 case OpAMD64ORLload:
330 return rewriteValueAMD64_OpAMD64ORLload(v)
331 case OpAMD64ORLmodify:
332 return rewriteValueAMD64_OpAMD64ORLmodify(v)
333 case OpAMD64ORQ:
334 return rewriteValueAMD64_OpAMD64ORQ(v)
335 case OpAMD64ORQconst:
336 return rewriteValueAMD64_OpAMD64ORQconst(v)
337 case OpAMD64ORQconstmodify:
338 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
339 case OpAMD64ORQload:
340 return rewriteValueAMD64_OpAMD64ORQload(v)
341 case OpAMD64ORQmodify:
342 return rewriteValueAMD64_OpAMD64ORQmodify(v)
343 case OpAMD64ROLB:
344 return rewriteValueAMD64_OpAMD64ROLB(v)
345 case OpAMD64ROLBconst:
346 return rewriteValueAMD64_OpAMD64ROLBconst(v)
347 case OpAMD64ROLL:
348 return rewriteValueAMD64_OpAMD64ROLL(v)
349 case OpAMD64ROLLconst:
350 return rewriteValueAMD64_OpAMD64ROLLconst(v)
351 case OpAMD64ROLQ:
352 return rewriteValueAMD64_OpAMD64ROLQ(v)
353 case OpAMD64ROLQconst:
354 return rewriteValueAMD64_OpAMD64ROLQconst(v)
355 case OpAMD64ROLW:
356 return rewriteValueAMD64_OpAMD64ROLW(v)
357 case OpAMD64ROLWconst:
358 return rewriteValueAMD64_OpAMD64ROLWconst(v)
359 case OpAMD64RORB:
360 return rewriteValueAMD64_OpAMD64RORB(v)
361 case OpAMD64RORL:
362 return rewriteValueAMD64_OpAMD64RORL(v)
363 case OpAMD64RORQ:
364 return rewriteValueAMD64_OpAMD64RORQ(v)
365 case OpAMD64RORW:
366 return rewriteValueAMD64_OpAMD64RORW(v)
367 case OpAMD64SARB:
368 return rewriteValueAMD64_OpAMD64SARB(v)
369 case OpAMD64SARBconst:
370 return rewriteValueAMD64_OpAMD64SARBconst(v)
371 case OpAMD64SARL:
372 return rewriteValueAMD64_OpAMD64SARL(v)
373 case OpAMD64SARLconst:
374 return rewriteValueAMD64_OpAMD64SARLconst(v)
375 case OpAMD64SARQ:
376 return rewriteValueAMD64_OpAMD64SARQ(v)
377 case OpAMD64SARQconst:
378 return rewriteValueAMD64_OpAMD64SARQconst(v)
379 case OpAMD64SARW:
380 return rewriteValueAMD64_OpAMD64SARW(v)
381 case OpAMD64SARWconst:
382 return rewriteValueAMD64_OpAMD64SARWconst(v)
383 case OpAMD64SBBLcarrymask:
384 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
385 case OpAMD64SBBQ:
386 return rewriteValueAMD64_OpAMD64SBBQ(v)
387 case OpAMD64SBBQcarrymask:
388 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
389 case OpAMD64SBBQconst:
390 return rewriteValueAMD64_OpAMD64SBBQconst(v)
391 case OpAMD64SETA:
392 return rewriteValueAMD64_OpAMD64SETA(v)
393 case OpAMD64SETAE:
394 return rewriteValueAMD64_OpAMD64SETAE(v)
395 case OpAMD64SETAEstore:
396 return rewriteValueAMD64_OpAMD64SETAEstore(v)
397 case OpAMD64SETAstore:
398 return rewriteValueAMD64_OpAMD64SETAstore(v)
399 case OpAMD64SETB:
400 return rewriteValueAMD64_OpAMD64SETB(v)
401 case OpAMD64SETBE:
402 return rewriteValueAMD64_OpAMD64SETBE(v)
403 case OpAMD64SETBEstore:
404 return rewriteValueAMD64_OpAMD64SETBEstore(v)
405 case OpAMD64SETBstore:
406 return rewriteValueAMD64_OpAMD64SETBstore(v)
407 case OpAMD64SETEQ:
408 return rewriteValueAMD64_OpAMD64SETEQ(v)
409 case OpAMD64SETEQstore:
410 return rewriteValueAMD64_OpAMD64SETEQstore(v)
411 case OpAMD64SETG:
412 return rewriteValueAMD64_OpAMD64SETG(v)
413 case OpAMD64SETGE:
414 return rewriteValueAMD64_OpAMD64SETGE(v)
415 case OpAMD64SETGEstore:
416 return rewriteValueAMD64_OpAMD64SETGEstore(v)
417 case OpAMD64SETGstore:
418 return rewriteValueAMD64_OpAMD64SETGstore(v)
419 case OpAMD64SETL:
420 return rewriteValueAMD64_OpAMD64SETL(v)
421 case OpAMD64SETLE:
422 return rewriteValueAMD64_OpAMD64SETLE(v)
423 case OpAMD64SETLEstore:
424 return rewriteValueAMD64_OpAMD64SETLEstore(v)
425 case OpAMD64SETLstore:
426 return rewriteValueAMD64_OpAMD64SETLstore(v)
427 case OpAMD64SETNE:
428 return rewriteValueAMD64_OpAMD64SETNE(v)
429 case OpAMD64SETNEstore:
430 return rewriteValueAMD64_OpAMD64SETNEstore(v)
431 case OpAMD64SHLL:
432 return rewriteValueAMD64_OpAMD64SHLL(v)
433 case OpAMD64SHLLconst:
434 return rewriteValueAMD64_OpAMD64SHLLconst(v)
435 case OpAMD64SHLQ:
436 return rewriteValueAMD64_OpAMD64SHLQ(v)
437 case OpAMD64SHLQconst:
438 return rewriteValueAMD64_OpAMD64SHLQconst(v)
439 case OpAMD64SHRB:
440 return rewriteValueAMD64_OpAMD64SHRB(v)
441 case OpAMD64SHRBconst:
442 return rewriteValueAMD64_OpAMD64SHRBconst(v)
443 case OpAMD64SHRL:
444 return rewriteValueAMD64_OpAMD64SHRL(v)
445 case OpAMD64SHRLconst:
446 return rewriteValueAMD64_OpAMD64SHRLconst(v)
447 case OpAMD64SHRQ:
448 return rewriteValueAMD64_OpAMD64SHRQ(v)
449 case OpAMD64SHRQconst:
450 return rewriteValueAMD64_OpAMD64SHRQconst(v)
451 case OpAMD64SHRW:
452 return rewriteValueAMD64_OpAMD64SHRW(v)
453 case OpAMD64SHRWconst:
454 return rewriteValueAMD64_OpAMD64SHRWconst(v)
455 case OpAMD64SUBL:
456 return rewriteValueAMD64_OpAMD64SUBL(v)
457 case OpAMD64SUBLconst:
458 return rewriteValueAMD64_OpAMD64SUBLconst(v)
459 case OpAMD64SUBLload:
460 return rewriteValueAMD64_OpAMD64SUBLload(v)
461 case OpAMD64SUBLmodify:
462 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
463 case OpAMD64SUBQ:
464 return rewriteValueAMD64_OpAMD64SUBQ(v)
465 case OpAMD64SUBQborrow:
466 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
467 case OpAMD64SUBQconst:
468 return rewriteValueAMD64_OpAMD64SUBQconst(v)
469 case OpAMD64SUBQload:
470 return rewriteValueAMD64_OpAMD64SUBQload(v)
471 case OpAMD64SUBQmodify:
472 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
473 case OpAMD64SUBSD:
474 return rewriteValueAMD64_OpAMD64SUBSD(v)
475 case OpAMD64SUBSDload:
476 return rewriteValueAMD64_OpAMD64SUBSDload(v)
477 case OpAMD64SUBSS:
478 return rewriteValueAMD64_OpAMD64SUBSS(v)
479 case OpAMD64SUBSSload:
480 return rewriteValueAMD64_OpAMD64SUBSSload(v)
481 case OpAMD64TESTB:
482 return rewriteValueAMD64_OpAMD64TESTB(v)
483 case OpAMD64TESTBconst:
484 return rewriteValueAMD64_OpAMD64TESTBconst(v)
485 case OpAMD64TESTL:
486 return rewriteValueAMD64_OpAMD64TESTL(v)
487 case OpAMD64TESTLconst:
488 return rewriteValueAMD64_OpAMD64TESTLconst(v)
489 case OpAMD64TESTQ:
490 return rewriteValueAMD64_OpAMD64TESTQ(v)
491 case OpAMD64TESTQconst:
492 return rewriteValueAMD64_OpAMD64TESTQconst(v)
493 case OpAMD64TESTW:
494 return rewriteValueAMD64_OpAMD64TESTW(v)
495 case OpAMD64TESTWconst:
496 return rewriteValueAMD64_OpAMD64TESTWconst(v)
497 case OpAMD64XADDLlock:
498 return rewriteValueAMD64_OpAMD64XADDLlock(v)
499 case OpAMD64XADDQlock:
500 return rewriteValueAMD64_OpAMD64XADDQlock(v)
501 case OpAMD64XCHGL:
502 return rewriteValueAMD64_OpAMD64XCHGL(v)
503 case OpAMD64XCHGQ:
504 return rewriteValueAMD64_OpAMD64XCHGQ(v)
505 case OpAMD64XORL:
506 return rewriteValueAMD64_OpAMD64XORL(v)
507 case OpAMD64XORLconst:
508 return rewriteValueAMD64_OpAMD64XORLconst(v)
509 case OpAMD64XORLconstmodify:
510 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
511 case OpAMD64XORLload:
512 return rewriteValueAMD64_OpAMD64XORLload(v)
513 case OpAMD64XORLmodify:
514 return rewriteValueAMD64_OpAMD64XORLmodify(v)
515 case OpAMD64XORQ:
516 return rewriteValueAMD64_OpAMD64XORQ(v)
517 case OpAMD64XORQconst:
518 return rewriteValueAMD64_OpAMD64XORQconst(v)
519 case OpAMD64XORQconstmodify:
520 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
521 case OpAMD64XORQload:
522 return rewriteValueAMD64_OpAMD64XORQload(v)
523 case OpAMD64XORQmodify:
524 return rewriteValueAMD64_OpAMD64XORQmodify(v)
525 case OpAdd16:
526 v.Op = OpAMD64ADDL
527 return true
528 case OpAdd32:
529 v.Op = OpAMD64ADDL
530 return true
531 case OpAdd32F:
532 v.Op = OpAMD64ADDSS
533 return true
534 case OpAdd64:
535 v.Op = OpAMD64ADDQ
536 return true
537 case OpAdd64F:
538 v.Op = OpAMD64ADDSD
539 return true
540 case OpAdd8:
541 v.Op = OpAMD64ADDL
542 return true
543 case OpAddPtr:
544 v.Op = OpAMD64ADDQ
545 return true
546 case OpAddr:
547 return rewriteValueAMD64_OpAddr(v)
548 case OpAnd16:
549 v.Op = OpAMD64ANDL
550 return true
551 case OpAnd32:
552 v.Op = OpAMD64ANDL
553 return true
554 case OpAnd64:
555 v.Op = OpAMD64ANDQ
556 return true
557 case OpAnd8:
558 v.Op = OpAMD64ANDL
559 return true
560 case OpAndB:
561 v.Op = OpAMD64ANDL
562 return true
563 case OpAtomicAdd32:
564 return rewriteValueAMD64_OpAtomicAdd32(v)
565 case OpAtomicAdd64:
566 return rewriteValueAMD64_OpAtomicAdd64(v)
567 case OpAtomicAnd32:
568 return rewriteValueAMD64_OpAtomicAnd32(v)
569 case OpAtomicAnd8:
570 return rewriteValueAMD64_OpAtomicAnd8(v)
571 case OpAtomicCompareAndSwap32:
572 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
573 case OpAtomicCompareAndSwap64:
574 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
575 case OpAtomicExchange32:
576 return rewriteValueAMD64_OpAtomicExchange32(v)
577 case OpAtomicExchange64:
578 return rewriteValueAMD64_OpAtomicExchange64(v)
579 case OpAtomicLoad32:
580 return rewriteValueAMD64_OpAtomicLoad32(v)
581 case OpAtomicLoad64:
582 return rewriteValueAMD64_OpAtomicLoad64(v)
583 case OpAtomicLoad8:
584 return rewriteValueAMD64_OpAtomicLoad8(v)
585 case OpAtomicLoadPtr:
586 return rewriteValueAMD64_OpAtomicLoadPtr(v)
587 case OpAtomicOr32:
588 return rewriteValueAMD64_OpAtomicOr32(v)
589 case OpAtomicOr8:
590 return rewriteValueAMD64_OpAtomicOr8(v)
591 case OpAtomicStore32:
592 return rewriteValueAMD64_OpAtomicStore32(v)
593 case OpAtomicStore64:
594 return rewriteValueAMD64_OpAtomicStore64(v)
595 case OpAtomicStore8:
596 return rewriteValueAMD64_OpAtomicStore8(v)
597 case OpAtomicStorePtrNoWB:
598 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
599 case OpAvg64u:
600 v.Op = OpAMD64AVGQU
601 return true
602 case OpBitLen16:
603 return rewriteValueAMD64_OpBitLen16(v)
604 case OpBitLen32:
605 return rewriteValueAMD64_OpBitLen32(v)
606 case OpBitLen64:
607 return rewriteValueAMD64_OpBitLen64(v)
608 case OpBitLen8:
609 return rewriteValueAMD64_OpBitLen8(v)
610 case OpBswap32:
611 v.Op = OpAMD64BSWAPL
612 return true
613 case OpBswap64:
614 v.Op = OpAMD64BSWAPQ
615 return true
616 case OpCeil:
617 return rewriteValueAMD64_OpCeil(v)
618 case OpClosureCall:
619 v.Op = OpAMD64CALLclosure
620 return true
621 case OpCom16:
622 v.Op = OpAMD64NOTL
623 return true
624 case OpCom32:
625 v.Op = OpAMD64NOTL
626 return true
627 case OpCom64:
628 v.Op = OpAMD64NOTQ
629 return true
630 case OpCom8:
631 v.Op = OpAMD64NOTL
632 return true
633 case OpCondSelect:
634 return rewriteValueAMD64_OpCondSelect(v)
635 case OpConst16:
636 return rewriteValueAMD64_OpConst16(v)
637 case OpConst32:
638 v.Op = OpAMD64MOVLconst
639 return true
640 case OpConst32F:
641 v.Op = OpAMD64MOVSSconst
642 return true
643 case OpConst64:
644 v.Op = OpAMD64MOVQconst
645 return true
646 case OpConst64F:
647 v.Op = OpAMD64MOVSDconst
648 return true
649 case OpConst8:
650 return rewriteValueAMD64_OpConst8(v)
651 case OpConstBool:
652 return rewriteValueAMD64_OpConstBool(v)
653 case OpConstNil:
654 return rewriteValueAMD64_OpConstNil(v)
655 case OpCtz16:
656 return rewriteValueAMD64_OpCtz16(v)
657 case OpCtz16NonZero:
658 return rewriteValueAMD64_OpCtz16NonZero(v)
659 case OpCtz32:
660 return rewriteValueAMD64_OpCtz32(v)
661 case OpCtz32NonZero:
662 return rewriteValueAMD64_OpCtz32NonZero(v)
663 case OpCtz64:
664 return rewriteValueAMD64_OpCtz64(v)
665 case OpCtz64NonZero:
666 return rewriteValueAMD64_OpCtz64NonZero(v)
667 case OpCtz8:
668 return rewriteValueAMD64_OpCtz8(v)
669 case OpCtz8NonZero:
670 return rewriteValueAMD64_OpCtz8NonZero(v)
671 case OpCvt32Fto32:
672 v.Op = OpAMD64CVTTSS2SL
673 return true
674 case OpCvt32Fto64:
675 v.Op = OpAMD64CVTTSS2SQ
676 return true
677 case OpCvt32Fto64F:
678 v.Op = OpAMD64CVTSS2SD
679 return true
680 case OpCvt32to32F:
681 v.Op = OpAMD64CVTSL2SS
682 return true
683 case OpCvt32to64F:
684 v.Op = OpAMD64CVTSL2SD
685 return true
686 case OpCvt64Fto32:
687 v.Op = OpAMD64CVTTSD2SL
688 return true
689 case OpCvt64Fto32F:
690 v.Op = OpAMD64CVTSD2SS
691 return true
692 case OpCvt64Fto64:
693 v.Op = OpAMD64CVTTSD2SQ
694 return true
695 case OpCvt64to32F:
696 v.Op = OpAMD64CVTSQ2SS
697 return true
698 case OpCvt64to64F:
699 v.Op = OpAMD64CVTSQ2SD
700 return true
701 case OpCvtBoolToUint8:
702 v.Op = OpCopy
703 return true
704 case OpDiv128u:
705 v.Op = OpAMD64DIVQU2
706 return true
707 case OpDiv16:
708 return rewriteValueAMD64_OpDiv16(v)
709 case OpDiv16u:
710 return rewriteValueAMD64_OpDiv16u(v)
711 case OpDiv32:
712 return rewriteValueAMD64_OpDiv32(v)
713 case OpDiv32F:
714 v.Op = OpAMD64DIVSS
715 return true
716 case OpDiv32u:
717 return rewriteValueAMD64_OpDiv32u(v)
718 case OpDiv64:
719 return rewriteValueAMD64_OpDiv64(v)
720 case OpDiv64F:
721 v.Op = OpAMD64DIVSD
722 return true
723 case OpDiv64u:
724 return rewriteValueAMD64_OpDiv64u(v)
725 case OpDiv8:
726 return rewriteValueAMD64_OpDiv8(v)
727 case OpDiv8u:
728 return rewriteValueAMD64_OpDiv8u(v)
729 case OpEq16:
730 return rewriteValueAMD64_OpEq16(v)
731 case OpEq32:
732 return rewriteValueAMD64_OpEq32(v)
733 case OpEq32F:
734 return rewriteValueAMD64_OpEq32F(v)
735 case OpEq64:
736 return rewriteValueAMD64_OpEq64(v)
737 case OpEq64F:
738 return rewriteValueAMD64_OpEq64F(v)
739 case OpEq8:
740 return rewriteValueAMD64_OpEq8(v)
741 case OpEqB:
742 return rewriteValueAMD64_OpEqB(v)
743 case OpEqPtr:
744 return rewriteValueAMD64_OpEqPtr(v)
745 case OpFMA:
746 return rewriteValueAMD64_OpFMA(v)
747 case OpFloor:
748 return rewriteValueAMD64_OpFloor(v)
749 case OpGetCallerPC:
750 v.Op = OpAMD64LoweredGetCallerPC
751 return true
752 case OpGetCallerSP:
753 v.Op = OpAMD64LoweredGetCallerSP
754 return true
755 case OpGetClosurePtr:
756 v.Op = OpAMD64LoweredGetClosurePtr
757 return true
758 case OpGetG:
759 return rewriteValueAMD64_OpGetG(v)
760 case OpHasCPUFeature:
761 return rewriteValueAMD64_OpHasCPUFeature(v)
762 case OpHmul32:
763 v.Op = OpAMD64HMULL
764 return true
765 case OpHmul32u:
766 v.Op = OpAMD64HMULLU
767 return true
768 case OpHmul64:
769 v.Op = OpAMD64HMULQ
770 return true
771 case OpHmul64u:
772 v.Op = OpAMD64HMULQU
773 return true
774 case OpInterCall:
775 v.Op = OpAMD64CALLinter
776 return true
777 case OpIsInBounds:
778 return rewriteValueAMD64_OpIsInBounds(v)
779 case OpIsNonNil:
780 return rewriteValueAMD64_OpIsNonNil(v)
781 case OpIsSliceInBounds:
782 return rewriteValueAMD64_OpIsSliceInBounds(v)
783 case OpLeq16:
784 return rewriteValueAMD64_OpLeq16(v)
785 case OpLeq16U:
786 return rewriteValueAMD64_OpLeq16U(v)
787 case OpLeq32:
788 return rewriteValueAMD64_OpLeq32(v)
789 case OpLeq32F:
790 return rewriteValueAMD64_OpLeq32F(v)
791 case OpLeq32U:
792 return rewriteValueAMD64_OpLeq32U(v)
793 case OpLeq64:
794 return rewriteValueAMD64_OpLeq64(v)
795 case OpLeq64F:
796 return rewriteValueAMD64_OpLeq64F(v)
797 case OpLeq64U:
798 return rewriteValueAMD64_OpLeq64U(v)
799 case OpLeq8:
800 return rewriteValueAMD64_OpLeq8(v)
801 case OpLeq8U:
802 return rewriteValueAMD64_OpLeq8U(v)
803 case OpLess16:
804 return rewriteValueAMD64_OpLess16(v)
805 case OpLess16U:
806 return rewriteValueAMD64_OpLess16U(v)
807 case OpLess32:
808 return rewriteValueAMD64_OpLess32(v)
809 case OpLess32F:
810 return rewriteValueAMD64_OpLess32F(v)
811 case OpLess32U:
812 return rewriteValueAMD64_OpLess32U(v)
813 case OpLess64:
814 return rewriteValueAMD64_OpLess64(v)
815 case OpLess64F:
816 return rewriteValueAMD64_OpLess64F(v)
817 case OpLess64U:
818 return rewriteValueAMD64_OpLess64U(v)
819 case OpLess8:
820 return rewriteValueAMD64_OpLess8(v)
821 case OpLess8U:
822 return rewriteValueAMD64_OpLess8U(v)
823 case OpLoad:
824 return rewriteValueAMD64_OpLoad(v)
825 case OpLocalAddr:
826 return rewriteValueAMD64_OpLocalAddr(v)
827 case OpLsh16x16:
828 return rewriteValueAMD64_OpLsh16x16(v)
829 case OpLsh16x32:
830 return rewriteValueAMD64_OpLsh16x32(v)
831 case OpLsh16x64:
832 return rewriteValueAMD64_OpLsh16x64(v)
833 case OpLsh16x8:
834 return rewriteValueAMD64_OpLsh16x8(v)
835 case OpLsh32x16:
836 return rewriteValueAMD64_OpLsh32x16(v)
837 case OpLsh32x32:
838 return rewriteValueAMD64_OpLsh32x32(v)
839 case OpLsh32x64:
840 return rewriteValueAMD64_OpLsh32x64(v)
841 case OpLsh32x8:
842 return rewriteValueAMD64_OpLsh32x8(v)
843 case OpLsh64x16:
844 return rewriteValueAMD64_OpLsh64x16(v)
845 case OpLsh64x32:
846 return rewriteValueAMD64_OpLsh64x32(v)
847 case OpLsh64x64:
848 return rewriteValueAMD64_OpLsh64x64(v)
849 case OpLsh64x8:
850 return rewriteValueAMD64_OpLsh64x8(v)
851 case OpLsh8x16:
852 return rewriteValueAMD64_OpLsh8x16(v)
853 case OpLsh8x32:
854 return rewriteValueAMD64_OpLsh8x32(v)
855 case OpLsh8x64:
856 return rewriteValueAMD64_OpLsh8x64(v)
857 case OpLsh8x8:
858 return rewriteValueAMD64_OpLsh8x8(v)
859 case OpMod16:
860 return rewriteValueAMD64_OpMod16(v)
861 case OpMod16u:
862 return rewriteValueAMD64_OpMod16u(v)
863 case OpMod32:
864 return rewriteValueAMD64_OpMod32(v)
865 case OpMod32u:
866 return rewriteValueAMD64_OpMod32u(v)
867 case OpMod64:
868 return rewriteValueAMD64_OpMod64(v)
869 case OpMod64u:
870 return rewriteValueAMD64_OpMod64u(v)
871 case OpMod8:
872 return rewriteValueAMD64_OpMod8(v)
873 case OpMod8u:
874 return rewriteValueAMD64_OpMod8u(v)
875 case OpMove:
876 return rewriteValueAMD64_OpMove(v)
877 case OpMul16:
878 v.Op = OpAMD64MULL
879 return true
880 case OpMul32:
881 v.Op = OpAMD64MULL
882 return true
883 case OpMul32F:
884 v.Op = OpAMD64MULSS
885 return true
886 case OpMul64:
887 v.Op = OpAMD64MULQ
888 return true
889 case OpMul64F:
890 v.Op = OpAMD64MULSD
891 return true
892 case OpMul64uhilo:
893 v.Op = OpAMD64MULQU2
894 return true
895 case OpMul8:
896 v.Op = OpAMD64MULL
897 return true
898 case OpNeg16:
899 v.Op = OpAMD64NEGL
900 return true
901 case OpNeg32:
902 v.Op = OpAMD64NEGL
903 return true
904 case OpNeg32F:
905 return rewriteValueAMD64_OpNeg32F(v)
906 case OpNeg64:
907 v.Op = OpAMD64NEGQ
908 return true
909 case OpNeg64F:
910 return rewriteValueAMD64_OpNeg64F(v)
911 case OpNeg8:
912 v.Op = OpAMD64NEGL
913 return true
914 case OpNeq16:
915 return rewriteValueAMD64_OpNeq16(v)
916 case OpNeq32:
917 return rewriteValueAMD64_OpNeq32(v)
918 case OpNeq32F:
919 return rewriteValueAMD64_OpNeq32F(v)
920 case OpNeq64:
921 return rewriteValueAMD64_OpNeq64(v)
922 case OpNeq64F:
923 return rewriteValueAMD64_OpNeq64F(v)
924 case OpNeq8:
925 return rewriteValueAMD64_OpNeq8(v)
926 case OpNeqB:
927 return rewriteValueAMD64_OpNeqB(v)
928 case OpNeqPtr:
929 return rewriteValueAMD64_OpNeqPtr(v)
930 case OpNilCheck:
931 v.Op = OpAMD64LoweredNilCheck
932 return true
933 case OpNot:
934 return rewriteValueAMD64_OpNot(v)
935 case OpOffPtr:
936 return rewriteValueAMD64_OpOffPtr(v)
937 case OpOr16:
938 v.Op = OpAMD64ORL
939 return true
940 case OpOr32:
941 v.Op = OpAMD64ORL
942 return true
943 case OpOr64:
944 v.Op = OpAMD64ORQ
945 return true
946 case OpOr8:
947 v.Op = OpAMD64ORL
948 return true
949 case OpOrB:
950 v.Op = OpAMD64ORL
951 return true
952 case OpPanicBounds:
953 return rewriteValueAMD64_OpPanicBounds(v)
954 case OpPopCount16:
955 return rewriteValueAMD64_OpPopCount16(v)
956 case OpPopCount32:
957 v.Op = OpAMD64POPCNTL
958 return true
959 case OpPopCount64:
960 v.Op = OpAMD64POPCNTQ
961 return true
962 case OpPopCount8:
963 return rewriteValueAMD64_OpPopCount8(v)
964 case OpPrefetchCache:
965 v.Op = OpAMD64PrefetchT0
966 return true
967 case OpPrefetchCacheStreamed:
968 v.Op = OpAMD64PrefetchNTA
969 return true
970 case OpRotateLeft16:
971 v.Op = OpAMD64ROLW
972 return true
973 case OpRotateLeft32:
974 v.Op = OpAMD64ROLL
975 return true
976 case OpRotateLeft64:
977 v.Op = OpAMD64ROLQ
978 return true
979 case OpRotateLeft8:
980 v.Op = OpAMD64ROLB
981 return true
982 case OpRound32F:
983 v.Op = OpCopy
984 return true
985 case OpRound64F:
986 v.Op = OpCopy
987 return true
988 case OpRoundToEven:
989 return rewriteValueAMD64_OpRoundToEven(v)
990 case OpRsh16Ux16:
991 return rewriteValueAMD64_OpRsh16Ux16(v)
992 case OpRsh16Ux32:
993 return rewriteValueAMD64_OpRsh16Ux32(v)
994 case OpRsh16Ux64:
995 return rewriteValueAMD64_OpRsh16Ux64(v)
996 case OpRsh16Ux8:
997 return rewriteValueAMD64_OpRsh16Ux8(v)
998 case OpRsh16x16:
999 return rewriteValueAMD64_OpRsh16x16(v)
1000 case OpRsh16x32:
1001 return rewriteValueAMD64_OpRsh16x32(v)
1002 case OpRsh16x64:
1003 return rewriteValueAMD64_OpRsh16x64(v)
1004 case OpRsh16x8:
1005 return rewriteValueAMD64_OpRsh16x8(v)
1006 case OpRsh32Ux16:
1007 return rewriteValueAMD64_OpRsh32Ux16(v)
1008 case OpRsh32Ux32:
1009 return rewriteValueAMD64_OpRsh32Ux32(v)
1010 case OpRsh32Ux64:
1011 return rewriteValueAMD64_OpRsh32Ux64(v)
1012 case OpRsh32Ux8:
1013 return rewriteValueAMD64_OpRsh32Ux8(v)
1014 case OpRsh32x16:
1015 return rewriteValueAMD64_OpRsh32x16(v)
1016 case OpRsh32x32:
1017 return rewriteValueAMD64_OpRsh32x32(v)
1018 case OpRsh32x64:
1019 return rewriteValueAMD64_OpRsh32x64(v)
1020 case OpRsh32x8:
1021 return rewriteValueAMD64_OpRsh32x8(v)
1022 case OpRsh64Ux16:
1023 return rewriteValueAMD64_OpRsh64Ux16(v)
1024 case OpRsh64Ux32:
1025 return rewriteValueAMD64_OpRsh64Ux32(v)
1026 case OpRsh64Ux64:
1027 return rewriteValueAMD64_OpRsh64Ux64(v)
1028 case OpRsh64Ux8:
1029 return rewriteValueAMD64_OpRsh64Ux8(v)
1030 case OpRsh64x16:
1031 return rewriteValueAMD64_OpRsh64x16(v)
1032 case OpRsh64x32:
1033 return rewriteValueAMD64_OpRsh64x32(v)
1034 case OpRsh64x64:
1035 return rewriteValueAMD64_OpRsh64x64(v)
1036 case OpRsh64x8:
1037 return rewriteValueAMD64_OpRsh64x8(v)
1038 case OpRsh8Ux16:
1039 return rewriteValueAMD64_OpRsh8Ux16(v)
1040 case OpRsh8Ux32:
1041 return rewriteValueAMD64_OpRsh8Ux32(v)
1042 case OpRsh8Ux64:
1043 return rewriteValueAMD64_OpRsh8Ux64(v)
1044 case OpRsh8Ux8:
1045 return rewriteValueAMD64_OpRsh8Ux8(v)
1046 case OpRsh8x16:
1047 return rewriteValueAMD64_OpRsh8x16(v)
1048 case OpRsh8x32:
1049 return rewriteValueAMD64_OpRsh8x32(v)
1050 case OpRsh8x64:
1051 return rewriteValueAMD64_OpRsh8x64(v)
1052 case OpRsh8x8:
1053 return rewriteValueAMD64_OpRsh8x8(v)
1054 case OpSelect0:
1055 return rewriteValueAMD64_OpSelect0(v)
1056 case OpSelect1:
1057 return rewriteValueAMD64_OpSelect1(v)
1058 case OpSelectN:
1059 return rewriteValueAMD64_OpSelectN(v)
1060 case OpSignExt16to32:
1061 v.Op = OpAMD64MOVWQSX
1062 return true
1063 case OpSignExt16to64:
1064 v.Op = OpAMD64MOVWQSX
1065 return true
1066 case OpSignExt32to64:
1067 v.Op = OpAMD64MOVLQSX
1068 return true
1069 case OpSignExt8to16:
1070 v.Op = OpAMD64MOVBQSX
1071 return true
1072 case OpSignExt8to32:
1073 v.Op = OpAMD64MOVBQSX
1074 return true
1075 case OpSignExt8to64:
1076 v.Op = OpAMD64MOVBQSX
1077 return true
1078 case OpSlicemask:
1079 return rewriteValueAMD64_OpSlicemask(v)
1080 case OpSpectreIndex:
1081 return rewriteValueAMD64_OpSpectreIndex(v)
1082 case OpSpectreSliceIndex:
1083 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1084 case OpSqrt:
1085 v.Op = OpAMD64SQRTSD
1086 return true
1087 case OpSqrt32:
1088 v.Op = OpAMD64SQRTSS
1089 return true
1090 case OpStaticCall:
1091 v.Op = OpAMD64CALLstatic
1092 return true
1093 case OpStore:
1094 return rewriteValueAMD64_OpStore(v)
1095 case OpSub16:
1096 v.Op = OpAMD64SUBL
1097 return true
1098 case OpSub32:
1099 v.Op = OpAMD64SUBL
1100 return true
1101 case OpSub32F:
1102 v.Op = OpAMD64SUBSS
1103 return true
1104 case OpSub64:
1105 v.Op = OpAMD64SUBQ
1106 return true
1107 case OpSub64F:
1108 v.Op = OpAMD64SUBSD
1109 return true
1110 case OpSub8:
1111 v.Op = OpAMD64SUBL
1112 return true
1113 case OpSubPtr:
1114 v.Op = OpAMD64SUBQ
1115 return true
1116 case OpTailCall:
1117 v.Op = OpAMD64CALLtail
1118 return true
1119 case OpTrunc:
1120 return rewriteValueAMD64_OpTrunc(v)
1121 case OpTrunc16to8:
1122 v.Op = OpCopy
1123 return true
1124 case OpTrunc32to16:
1125 v.Op = OpCopy
1126 return true
1127 case OpTrunc32to8:
1128 v.Op = OpCopy
1129 return true
1130 case OpTrunc64to16:
1131 v.Op = OpCopy
1132 return true
1133 case OpTrunc64to32:
1134 v.Op = OpCopy
1135 return true
1136 case OpTrunc64to8:
1137 v.Op = OpCopy
1138 return true
1139 case OpWB:
1140 v.Op = OpAMD64LoweredWB
1141 return true
1142 case OpXor16:
1143 v.Op = OpAMD64XORL
1144 return true
1145 case OpXor32:
1146 v.Op = OpAMD64XORL
1147 return true
1148 case OpXor64:
1149 v.Op = OpAMD64XORQ
1150 return true
1151 case OpXor8:
1152 v.Op = OpAMD64XORL
1153 return true
1154 case OpZero:
1155 return rewriteValueAMD64_OpZero(v)
1156 case OpZeroExt16to32:
1157 v.Op = OpAMD64MOVWQZX
1158 return true
1159 case OpZeroExt16to64:
1160 v.Op = OpAMD64MOVWQZX
1161 return true
1162 case OpZeroExt32to64:
1163 v.Op = OpAMD64MOVLQZX
1164 return true
1165 case OpZeroExt8to16:
1166 v.Op = OpAMD64MOVBQZX
1167 return true
1168 case OpZeroExt8to32:
1169 v.Op = OpAMD64MOVBQZX
1170 return true
1171 case OpZeroExt8to64:
1172 v.Op = OpAMD64MOVBQZX
1173 return true
1174 }
1175 return false
1176 }
1177 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1178 v_2 := v.Args[2]
1179 v_1 := v.Args[1]
1180 v_0 := v.Args[0]
1181
1182
1183
1184 for {
1185 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1186 x := v_0
1187 if v_1.Op != OpAMD64MOVQconst {
1188 continue
1189 }
1190 c := auxIntToInt64(v_1.AuxInt)
1191 carry := v_2
1192 if !(is32Bit(c)) {
1193 continue
1194 }
1195 v.reset(OpAMD64ADCQconst)
1196 v.AuxInt = int32ToAuxInt(int32(c))
1197 v.AddArg2(x, carry)
1198 return true
1199 }
1200 break
1201 }
1202
1203
1204 for {
1205 x := v_0
1206 y := v_1
1207 if v_2.Op != OpAMD64FlagEQ {
1208 break
1209 }
1210 v.reset(OpAMD64ADDQcarry)
1211 v.AddArg2(x, y)
1212 return true
1213 }
1214 return false
1215 }
1216 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1217 v_1 := v.Args[1]
1218 v_0 := v.Args[0]
1219
1220
1221 for {
1222 c := auxIntToInt32(v.AuxInt)
1223 x := v_0
1224 if v_1.Op != OpAMD64FlagEQ {
1225 break
1226 }
1227 v.reset(OpAMD64ADDQconstcarry)
1228 v.AuxInt = int32ToAuxInt(c)
1229 v.AddArg(x)
1230 return true
1231 }
1232 return false
1233 }
1234 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1235 v_1 := v.Args[1]
1236 v_0 := v.Args[0]
1237
1238
1239 for {
1240 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1241 x := v_0
1242 if v_1.Op != OpAMD64MOVLconst {
1243 continue
1244 }
1245 c := auxIntToInt32(v_1.AuxInt)
1246 v.reset(OpAMD64ADDLconst)
1247 v.AuxInt = int32ToAuxInt(c)
1248 v.AddArg(x)
1249 return true
1250 }
1251 break
1252 }
1253
1254
1255
1256 for {
1257 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1258 if v_0.Op != OpAMD64SHLLconst {
1259 continue
1260 }
1261 c := auxIntToInt8(v_0.AuxInt)
1262 x := v_0.Args[0]
1263 if v_1.Op != OpAMD64SHRLconst {
1264 continue
1265 }
1266 d := auxIntToInt8(v_1.AuxInt)
1267 if x != v_1.Args[0] || !(d == 32-c) {
1268 continue
1269 }
1270 v.reset(OpAMD64ROLLconst)
1271 v.AuxInt = int8ToAuxInt(c)
1272 v.AddArg(x)
1273 return true
1274 }
1275 break
1276 }
1277
1278
1279
1280 for {
1281 t := v.Type
1282 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1283 if v_0.Op != OpAMD64SHLLconst {
1284 continue
1285 }
1286 c := auxIntToInt8(v_0.AuxInt)
1287 x := v_0.Args[0]
1288 if v_1.Op != OpAMD64SHRWconst {
1289 continue
1290 }
1291 d := auxIntToInt8(v_1.AuxInt)
1292 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
1293 continue
1294 }
1295 v.reset(OpAMD64ROLWconst)
1296 v.AuxInt = int8ToAuxInt(c)
1297 v.AddArg(x)
1298 return true
1299 }
1300 break
1301 }
1302
1303
1304
1305 for {
1306 t := v.Type
1307 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1308 if v_0.Op != OpAMD64SHLLconst {
1309 continue
1310 }
1311 c := auxIntToInt8(v_0.AuxInt)
1312 x := v_0.Args[0]
1313 if v_1.Op != OpAMD64SHRBconst {
1314 continue
1315 }
1316 d := auxIntToInt8(v_1.AuxInt)
1317 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
1318 continue
1319 }
1320 v.reset(OpAMD64ROLBconst)
1321 v.AuxInt = int8ToAuxInt(c)
1322 v.AddArg(x)
1323 return true
1324 }
1325 break
1326 }
1327
1328
1329 for {
1330 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1331 x := v_0
1332 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1333 continue
1334 }
1335 y := v_1.Args[0]
1336 v.reset(OpAMD64LEAL8)
1337 v.AddArg2(x, y)
1338 return true
1339 }
1340 break
1341 }
1342
1343
1344 for {
1345 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1346 x := v_0
1347 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1348 continue
1349 }
1350 y := v_1.Args[0]
1351 v.reset(OpAMD64LEAL4)
1352 v.AddArg2(x, y)
1353 return true
1354 }
1355 break
1356 }
1357
1358
1359 for {
1360 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1361 x := v_0
1362 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
1363 continue
1364 }
1365 y := v_1.Args[0]
1366 v.reset(OpAMD64LEAL2)
1367 v.AddArg2(x, y)
1368 return true
1369 }
1370 break
1371 }
1372
1373
1374 for {
1375 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1376 x := v_0
1377 if v_1.Op != OpAMD64ADDL {
1378 continue
1379 }
1380 y := v_1.Args[1]
1381 if y != v_1.Args[0] {
1382 continue
1383 }
1384 v.reset(OpAMD64LEAL2)
1385 v.AddArg2(x, y)
1386 return true
1387 }
1388 break
1389 }
1390
1391
1392 for {
1393 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1394 x := v_0
1395 if v_1.Op != OpAMD64ADDL {
1396 continue
1397 }
1398 _ = v_1.Args[1]
1399 v_1_0 := v_1.Args[0]
1400 v_1_1 := v_1.Args[1]
1401 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1402 if x != v_1_0 {
1403 continue
1404 }
1405 y := v_1_1
1406 v.reset(OpAMD64LEAL2)
1407 v.AddArg2(y, x)
1408 return true
1409 }
1410 }
1411 break
1412 }
1413
1414
1415 for {
1416 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1417 if v_0.Op != OpAMD64ADDLconst {
1418 continue
1419 }
1420 c := auxIntToInt32(v_0.AuxInt)
1421 x := v_0.Args[0]
1422 y := v_1
1423 v.reset(OpAMD64LEAL1)
1424 v.AuxInt = int32ToAuxInt(c)
1425 v.AddArg2(x, y)
1426 return true
1427 }
1428 break
1429 }
1430
1431
1432
1433 for {
1434 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1435 x := v_0
1436 if v_1.Op != OpAMD64LEAL {
1437 continue
1438 }
1439 c := auxIntToInt32(v_1.AuxInt)
1440 s := auxToSym(v_1.Aux)
1441 y := v_1.Args[0]
1442 if !(x.Op != OpSB && y.Op != OpSB) {
1443 continue
1444 }
1445 v.reset(OpAMD64LEAL1)
1446 v.AuxInt = int32ToAuxInt(c)
1447 v.Aux = symToAux(s)
1448 v.AddArg2(x, y)
1449 return true
1450 }
1451 break
1452 }
1453
1454
1455 for {
1456 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1457 x := v_0
1458 if v_1.Op != OpAMD64NEGL {
1459 continue
1460 }
1461 y := v_1.Args[0]
1462 v.reset(OpAMD64SUBL)
1463 v.AddArg2(x, y)
1464 return true
1465 }
1466 break
1467 }
1468
1469
1470
1471 for {
1472 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1473 x := v_0
1474 l := v_1
1475 if l.Op != OpAMD64MOVLload {
1476 continue
1477 }
1478 off := auxIntToInt32(l.AuxInt)
1479 sym := auxToSym(l.Aux)
1480 mem := l.Args[1]
1481 ptr := l.Args[0]
1482 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1483 continue
1484 }
1485 v.reset(OpAMD64ADDLload)
1486 v.AuxInt = int32ToAuxInt(off)
1487 v.Aux = symToAux(sym)
1488 v.AddArg3(x, ptr, mem)
1489 return true
1490 }
1491 break
1492 }
1493 return false
1494 }
1495 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1496 v_0 := v.Args[0]
1497
1498
1499 for {
1500 c := auxIntToInt32(v.AuxInt)
1501 if v_0.Op != OpAMD64ADDL {
1502 break
1503 }
1504 y := v_0.Args[1]
1505 x := v_0.Args[0]
1506 v.reset(OpAMD64LEAL1)
1507 v.AuxInt = int32ToAuxInt(c)
1508 v.AddArg2(x, y)
1509 return true
1510 }
1511
1512
1513 for {
1514 c := auxIntToInt32(v.AuxInt)
1515 if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1516 break
1517 }
1518 x := v_0.Args[0]
1519 v.reset(OpAMD64LEAL1)
1520 v.AuxInt = int32ToAuxInt(c)
1521 v.AddArg2(x, x)
1522 return true
1523 }
1524
1525
1526
1527 for {
1528 c := auxIntToInt32(v.AuxInt)
1529 if v_0.Op != OpAMD64LEAL {
1530 break
1531 }
1532 d := auxIntToInt32(v_0.AuxInt)
1533 s := auxToSym(v_0.Aux)
1534 x := v_0.Args[0]
1535 if !(is32Bit(int64(c) + int64(d))) {
1536 break
1537 }
1538 v.reset(OpAMD64LEAL)
1539 v.AuxInt = int32ToAuxInt(c + d)
1540 v.Aux = symToAux(s)
1541 v.AddArg(x)
1542 return true
1543 }
1544
1545
1546
1547 for {
1548 c := auxIntToInt32(v.AuxInt)
1549 if v_0.Op != OpAMD64LEAL1 {
1550 break
1551 }
1552 d := auxIntToInt32(v_0.AuxInt)
1553 s := auxToSym(v_0.Aux)
1554 y := v_0.Args[1]
1555 x := v_0.Args[0]
1556 if !(is32Bit(int64(c) + int64(d))) {
1557 break
1558 }
1559 v.reset(OpAMD64LEAL1)
1560 v.AuxInt = int32ToAuxInt(c + d)
1561 v.Aux = symToAux(s)
1562 v.AddArg2(x, y)
1563 return true
1564 }
1565
1566
1567
1568 for {
1569 c := auxIntToInt32(v.AuxInt)
1570 if v_0.Op != OpAMD64LEAL2 {
1571 break
1572 }
1573 d := auxIntToInt32(v_0.AuxInt)
1574 s := auxToSym(v_0.Aux)
1575 y := v_0.Args[1]
1576 x := v_0.Args[0]
1577 if !(is32Bit(int64(c) + int64(d))) {
1578 break
1579 }
1580 v.reset(OpAMD64LEAL2)
1581 v.AuxInt = int32ToAuxInt(c + d)
1582 v.Aux = symToAux(s)
1583 v.AddArg2(x, y)
1584 return true
1585 }
1586
1587
1588
1589 for {
1590 c := auxIntToInt32(v.AuxInt)
1591 if v_0.Op != OpAMD64LEAL4 {
1592 break
1593 }
1594 d := auxIntToInt32(v_0.AuxInt)
1595 s := auxToSym(v_0.Aux)
1596 y := v_0.Args[1]
1597 x := v_0.Args[0]
1598 if !(is32Bit(int64(c) + int64(d))) {
1599 break
1600 }
1601 v.reset(OpAMD64LEAL4)
1602 v.AuxInt = int32ToAuxInt(c + d)
1603 v.Aux = symToAux(s)
1604 v.AddArg2(x, y)
1605 return true
1606 }
1607
1608
1609
1610 for {
1611 c := auxIntToInt32(v.AuxInt)
1612 if v_0.Op != OpAMD64LEAL8 {
1613 break
1614 }
1615 d := auxIntToInt32(v_0.AuxInt)
1616 s := auxToSym(v_0.Aux)
1617 y := v_0.Args[1]
1618 x := v_0.Args[0]
1619 if !(is32Bit(int64(c) + int64(d))) {
1620 break
1621 }
1622 v.reset(OpAMD64LEAL8)
1623 v.AuxInt = int32ToAuxInt(c + d)
1624 v.Aux = symToAux(s)
1625 v.AddArg2(x, y)
1626 return true
1627 }
1628
1629
1630
1631 for {
1632 c := auxIntToInt32(v.AuxInt)
1633 x := v_0
1634 if !(c == 0) {
1635 break
1636 }
1637 v.copyOf(x)
1638 return true
1639 }
1640
1641
1642 for {
1643 c := auxIntToInt32(v.AuxInt)
1644 if v_0.Op != OpAMD64MOVLconst {
1645 break
1646 }
1647 d := auxIntToInt32(v_0.AuxInt)
1648 v.reset(OpAMD64MOVLconst)
1649 v.AuxInt = int32ToAuxInt(c + d)
1650 return true
1651 }
1652
1653
1654 for {
1655 c := auxIntToInt32(v.AuxInt)
1656 if v_0.Op != OpAMD64ADDLconst {
1657 break
1658 }
1659 d := auxIntToInt32(v_0.AuxInt)
1660 x := v_0.Args[0]
1661 v.reset(OpAMD64ADDLconst)
1662 v.AuxInt = int32ToAuxInt(c + d)
1663 v.AddArg(x)
1664 return true
1665 }
1666
1667
1668 for {
1669 off := auxIntToInt32(v.AuxInt)
1670 x := v_0
1671 if x.Op != OpSP {
1672 break
1673 }
1674 v.reset(OpAMD64LEAL)
1675 v.AuxInt = int32ToAuxInt(off)
1676 v.AddArg(x)
1677 return true
1678 }
1679 return false
1680 }
1681 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1682 v_1 := v.Args[1]
1683 v_0 := v.Args[0]
1684
1685
1686
1687 for {
1688 valoff1 := auxIntToValAndOff(v.AuxInt)
1689 sym := auxToSym(v.Aux)
1690 if v_0.Op != OpAMD64ADDQconst {
1691 break
1692 }
1693 off2 := auxIntToInt32(v_0.AuxInt)
1694 base := v_0.Args[0]
1695 mem := v_1
1696 if !(ValAndOff(valoff1).canAdd32(off2)) {
1697 break
1698 }
1699 v.reset(OpAMD64ADDLconstmodify)
1700 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1701 v.Aux = symToAux(sym)
1702 v.AddArg2(base, mem)
1703 return true
1704 }
1705
1706
1707
1708 for {
1709 valoff1 := auxIntToValAndOff(v.AuxInt)
1710 sym1 := auxToSym(v.Aux)
1711 if v_0.Op != OpAMD64LEAQ {
1712 break
1713 }
1714 off2 := auxIntToInt32(v_0.AuxInt)
1715 sym2 := auxToSym(v_0.Aux)
1716 base := v_0.Args[0]
1717 mem := v_1
1718 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1719 break
1720 }
1721 v.reset(OpAMD64ADDLconstmodify)
1722 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1723 v.Aux = symToAux(mergeSym(sym1, sym2))
1724 v.AddArg2(base, mem)
1725 return true
1726 }
1727 return false
1728 }
1729 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1730 v_2 := v.Args[2]
1731 v_1 := v.Args[1]
1732 v_0 := v.Args[0]
1733 b := v.Block
1734 typ := &b.Func.Config.Types
1735
1736
1737
1738 for {
1739 off1 := auxIntToInt32(v.AuxInt)
1740 sym := auxToSym(v.Aux)
1741 val := v_0
1742 if v_1.Op != OpAMD64ADDQconst {
1743 break
1744 }
1745 off2 := auxIntToInt32(v_1.AuxInt)
1746 base := v_1.Args[0]
1747 mem := v_2
1748 if !(is32Bit(int64(off1) + int64(off2))) {
1749 break
1750 }
1751 v.reset(OpAMD64ADDLload)
1752 v.AuxInt = int32ToAuxInt(off1 + off2)
1753 v.Aux = symToAux(sym)
1754 v.AddArg3(val, base, mem)
1755 return true
1756 }
1757
1758
1759
1760 for {
1761 off1 := auxIntToInt32(v.AuxInt)
1762 sym1 := auxToSym(v.Aux)
1763 val := v_0
1764 if v_1.Op != OpAMD64LEAQ {
1765 break
1766 }
1767 off2 := auxIntToInt32(v_1.AuxInt)
1768 sym2 := auxToSym(v_1.Aux)
1769 base := v_1.Args[0]
1770 mem := v_2
1771 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1772 break
1773 }
1774 v.reset(OpAMD64ADDLload)
1775 v.AuxInt = int32ToAuxInt(off1 + off2)
1776 v.Aux = symToAux(mergeSym(sym1, sym2))
1777 v.AddArg3(val, base, mem)
1778 return true
1779 }
1780
1781
1782 for {
1783 off := auxIntToInt32(v.AuxInt)
1784 sym := auxToSym(v.Aux)
1785 x := v_0
1786 ptr := v_1
1787 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1788 break
1789 }
1790 y := v_2.Args[1]
1791 if ptr != v_2.Args[0] {
1792 break
1793 }
1794 v.reset(OpAMD64ADDL)
1795 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1796 v0.AddArg(y)
1797 v.AddArg2(x, v0)
1798 return true
1799 }
1800 return false
1801 }
1802 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1803 v_2 := v.Args[2]
1804 v_1 := v.Args[1]
1805 v_0 := v.Args[0]
1806
1807
1808
1809 for {
1810 off1 := auxIntToInt32(v.AuxInt)
1811 sym := auxToSym(v.Aux)
1812 if v_0.Op != OpAMD64ADDQconst {
1813 break
1814 }
1815 off2 := auxIntToInt32(v_0.AuxInt)
1816 base := v_0.Args[0]
1817 val := v_1
1818 mem := v_2
1819 if !(is32Bit(int64(off1) + int64(off2))) {
1820 break
1821 }
1822 v.reset(OpAMD64ADDLmodify)
1823 v.AuxInt = int32ToAuxInt(off1 + off2)
1824 v.Aux = symToAux(sym)
1825 v.AddArg3(base, val, mem)
1826 return true
1827 }
1828
1829
1830
1831 for {
1832 off1 := auxIntToInt32(v.AuxInt)
1833 sym1 := auxToSym(v.Aux)
1834 if v_0.Op != OpAMD64LEAQ {
1835 break
1836 }
1837 off2 := auxIntToInt32(v_0.AuxInt)
1838 sym2 := auxToSym(v_0.Aux)
1839 base := v_0.Args[0]
1840 val := v_1
1841 mem := v_2
1842 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1843 break
1844 }
1845 v.reset(OpAMD64ADDLmodify)
1846 v.AuxInt = int32ToAuxInt(off1 + off2)
1847 v.Aux = symToAux(mergeSym(sym1, sym2))
1848 v.AddArg3(base, val, mem)
1849 return true
1850 }
1851 return false
1852 }
1853 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1854 v_1 := v.Args[1]
1855 v_0 := v.Args[0]
1856
1857
1858
1859 for {
1860 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1861 x := v_0
1862 if v_1.Op != OpAMD64MOVQconst {
1863 continue
1864 }
1865 c := auxIntToInt64(v_1.AuxInt)
1866 if !(is32Bit(c)) {
1867 continue
1868 }
1869 v.reset(OpAMD64ADDQconst)
1870 v.AuxInt = int32ToAuxInt(int32(c))
1871 v.AddArg(x)
1872 return true
1873 }
1874 break
1875 }
1876
1877
1878 for {
1879 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1880 x := v_0
1881 if v_1.Op != OpAMD64MOVLconst {
1882 continue
1883 }
1884 c := auxIntToInt32(v_1.AuxInt)
1885 v.reset(OpAMD64ADDQconst)
1886 v.AuxInt = int32ToAuxInt(c)
1887 v.AddArg(x)
1888 return true
1889 }
1890 break
1891 }
1892
1893
1894
1895 for {
1896 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1897 if v_0.Op != OpAMD64SHLQconst {
1898 continue
1899 }
1900 c := auxIntToInt8(v_0.AuxInt)
1901 x := v_0.Args[0]
1902 if v_1.Op != OpAMD64SHRQconst {
1903 continue
1904 }
1905 d := auxIntToInt8(v_1.AuxInt)
1906 if x != v_1.Args[0] || !(d == 64-c) {
1907 continue
1908 }
1909 v.reset(OpAMD64ROLQconst)
1910 v.AuxInt = int8ToAuxInt(c)
1911 v.AddArg(x)
1912 return true
1913 }
1914 break
1915 }
1916
1917
1918 for {
1919 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1920 x := v_0
1921 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1922 continue
1923 }
1924 y := v_1.Args[0]
1925 v.reset(OpAMD64LEAQ8)
1926 v.AddArg2(x, y)
1927 return true
1928 }
1929 break
1930 }
1931
1932
1933 for {
1934 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1935 x := v_0
1936 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1937 continue
1938 }
1939 y := v_1.Args[0]
1940 v.reset(OpAMD64LEAQ4)
1941 v.AddArg2(x, y)
1942 return true
1943 }
1944 break
1945 }
1946
1947
1948 for {
1949 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1950 x := v_0
1951 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
1952 continue
1953 }
1954 y := v_1.Args[0]
1955 v.reset(OpAMD64LEAQ2)
1956 v.AddArg2(x, y)
1957 return true
1958 }
1959 break
1960 }
1961
1962
1963 for {
1964 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1965 x := v_0
1966 if v_1.Op != OpAMD64ADDQ {
1967 continue
1968 }
1969 y := v_1.Args[1]
1970 if y != v_1.Args[0] {
1971 continue
1972 }
1973 v.reset(OpAMD64LEAQ2)
1974 v.AddArg2(x, y)
1975 return true
1976 }
1977 break
1978 }
1979
1980
1981 for {
1982 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1983 x := v_0
1984 if v_1.Op != OpAMD64ADDQ {
1985 continue
1986 }
1987 _ = v_1.Args[1]
1988 v_1_0 := v_1.Args[0]
1989 v_1_1 := v_1.Args[1]
1990 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1991 if x != v_1_0 {
1992 continue
1993 }
1994 y := v_1_1
1995 v.reset(OpAMD64LEAQ2)
1996 v.AddArg2(y, x)
1997 return true
1998 }
1999 }
2000 break
2001 }
2002
2003
2004 for {
2005 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2006 if v_0.Op != OpAMD64ADDQconst {
2007 continue
2008 }
2009 c := auxIntToInt32(v_0.AuxInt)
2010 x := v_0.Args[0]
2011 y := v_1
2012 v.reset(OpAMD64LEAQ1)
2013 v.AuxInt = int32ToAuxInt(c)
2014 v.AddArg2(x, y)
2015 return true
2016 }
2017 break
2018 }
2019
2020
2021
2022 for {
2023 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2024 x := v_0
2025 if v_1.Op != OpAMD64LEAQ {
2026 continue
2027 }
2028 c := auxIntToInt32(v_1.AuxInt)
2029 s := auxToSym(v_1.Aux)
2030 y := v_1.Args[0]
2031 if !(x.Op != OpSB && y.Op != OpSB) {
2032 continue
2033 }
2034 v.reset(OpAMD64LEAQ1)
2035 v.AuxInt = int32ToAuxInt(c)
2036 v.Aux = symToAux(s)
2037 v.AddArg2(x, y)
2038 return true
2039 }
2040 break
2041 }
2042
2043
2044 for {
2045 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2046 x := v_0
2047 if v_1.Op != OpAMD64NEGQ {
2048 continue
2049 }
2050 y := v_1.Args[0]
2051 v.reset(OpAMD64SUBQ)
2052 v.AddArg2(x, y)
2053 return true
2054 }
2055 break
2056 }
2057
2058
2059
2060 for {
2061 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2062 x := v_0
2063 l := v_1
2064 if l.Op != OpAMD64MOVQload {
2065 continue
2066 }
2067 off := auxIntToInt32(l.AuxInt)
2068 sym := auxToSym(l.Aux)
2069 mem := l.Args[1]
2070 ptr := l.Args[0]
2071 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2072 continue
2073 }
2074 v.reset(OpAMD64ADDQload)
2075 v.AuxInt = int32ToAuxInt(off)
2076 v.Aux = symToAux(sym)
2077 v.AddArg3(x, ptr, mem)
2078 return true
2079 }
2080 break
2081 }
2082 return false
2083 }
2084 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2085 v_1 := v.Args[1]
2086 v_0 := v.Args[0]
2087
2088
2089
2090 for {
2091 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2092 x := v_0
2093 if v_1.Op != OpAMD64MOVQconst {
2094 continue
2095 }
2096 c := auxIntToInt64(v_1.AuxInt)
2097 if !(is32Bit(c)) {
2098 continue
2099 }
2100 v.reset(OpAMD64ADDQconstcarry)
2101 v.AuxInt = int32ToAuxInt(int32(c))
2102 v.AddArg(x)
2103 return true
2104 }
2105 break
2106 }
2107 return false
2108 }
2109 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2110 v_0 := v.Args[0]
2111
2112
2113 for {
2114 c := auxIntToInt32(v.AuxInt)
2115 if v_0.Op != OpAMD64ADDQ {
2116 break
2117 }
2118 y := v_0.Args[1]
2119 x := v_0.Args[0]
2120 v.reset(OpAMD64LEAQ1)
2121 v.AuxInt = int32ToAuxInt(c)
2122 v.AddArg2(x, y)
2123 return true
2124 }
2125
2126
2127 for {
2128 c := auxIntToInt32(v.AuxInt)
2129 if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
2130 break
2131 }
2132 x := v_0.Args[0]
2133 v.reset(OpAMD64LEAQ1)
2134 v.AuxInt = int32ToAuxInt(c)
2135 v.AddArg2(x, x)
2136 return true
2137 }
2138
2139
2140
2141 for {
2142 c := auxIntToInt32(v.AuxInt)
2143 if v_0.Op != OpAMD64LEAQ {
2144 break
2145 }
2146 d := auxIntToInt32(v_0.AuxInt)
2147 s := auxToSym(v_0.Aux)
2148 x := v_0.Args[0]
2149 if !(is32Bit(int64(c) + int64(d))) {
2150 break
2151 }
2152 v.reset(OpAMD64LEAQ)
2153 v.AuxInt = int32ToAuxInt(c + d)
2154 v.Aux = symToAux(s)
2155 v.AddArg(x)
2156 return true
2157 }
2158
2159
2160
2161 for {
2162 c := auxIntToInt32(v.AuxInt)
2163 if v_0.Op != OpAMD64LEAQ1 {
2164 break
2165 }
2166 d := auxIntToInt32(v_0.AuxInt)
2167 s := auxToSym(v_0.Aux)
2168 y := v_0.Args[1]
2169 x := v_0.Args[0]
2170 if !(is32Bit(int64(c) + int64(d))) {
2171 break
2172 }
2173 v.reset(OpAMD64LEAQ1)
2174 v.AuxInt = int32ToAuxInt(c + d)
2175 v.Aux = symToAux(s)
2176 v.AddArg2(x, y)
2177 return true
2178 }
2179
2180
2181
2182 for {
2183 c := auxIntToInt32(v.AuxInt)
2184 if v_0.Op != OpAMD64LEAQ2 {
2185 break
2186 }
2187 d := auxIntToInt32(v_0.AuxInt)
2188 s := auxToSym(v_0.Aux)
2189 y := v_0.Args[1]
2190 x := v_0.Args[0]
2191 if !(is32Bit(int64(c) + int64(d))) {
2192 break
2193 }
2194 v.reset(OpAMD64LEAQ2)
2195 v.AuxInt = int32ToAuxInt(c + d)
2196 v.Aux = symToAux(s)
2197 v.AddArg2(x, y)
2198 return true
2199 }
2200
2201
2202
2203 for {
2204 c := auxIntToInt32(v.AuxInt)
2205 if v_0.Op != OpAMD64LEAQ4 {
2206 break
2207 }
2208 d := auxIntToInt32(v_0.AuxInt)
2209 s := auxToSym(v_0.Aux)
2210 y := v_0.Args[1]
2211 x := v_0.Args[0]
2212 if !(is32Bit(int64(c) + int64(d))) {
2213 break
2214 }
2215 v.reset(OpAMD64LEAQ4)
2216 v.AuxInt = int32ToAuxInt(c + d)
2217 v.Aux = symToAux(s)
2218 v.AddArg2(x, y)
2219 return true
2220 }
2221
2222
2223
2224 for {
2225 c := auxIntToInt32(v.AuxInt)
2226 if v_0.Op != OpAMD64LEAQ8 {
2227 break
2228 }
2229 d := auxIntToInt32(v_0.AuxInt)
2230 s := auxToSym(v_0.Aux)
2231 y := v_0.Args[1]
2232 x := v_0.Args[0]
2233 if !(is32Bit(int64(c) + int64(d))) {
2234 break
2235 }
2236 v.reset(OpAMD64LEAQ8)
2237 v.AuxInt = int32ToAuxInt(c + d)
2238 v.Aux = symToAux(s)
2239 v.AddArg2(x, y)
2240 return true
2241 }
2242
2243
2244 for {
2245 if auxIntToInt32(v.AuxInt) != 0 {
2246 break
2247 }
2248 x := v_0
2249 v.copyOf(x)
2250 return true
2251 }
2252
2253
2254 for {
2255 c := auxIntToInt32(v.AuxInt)
2256 if v_0.Op != OpAMD64MOVQconst {
2257 break
2258 }
2259 d := auxIntToInt64(v_0.AuxInt)
2260 v.reset(OpAMD64MOVQconst)
2261 v.AuxInt = int64ToAuxInt(int64(c) + d)
2262 return true
2263 }
2264
2265
2266
2267 for {
2268 c := auxIntToInt32(v.AuxInt)
2269 if v_0.Op != OpAMD64ADDQconst {
2270 break
2271 }
2272 d := auxIntToInt32(v_0.AuxInt)
2273 x := v_0.Args[0]
2274 if !(is32Bit(int64(c) + int64(d))) {
2275 break
2276 }
2277 v.reset(OpAMD64ADDQconst)
2278 v.AuxInt = int32ToAuxInt(c + d)
2279 v.AddArg(x)
2280 return true
2281 }
2282
2283
2284 for {
2285 off := auxIntToInt32(v.AuxInt)
2286 x := v_0
2287 if x.Op != OpSP {
2288 break
2289 }
2290 v.reset(OpAMD64LEAQ)
2291 v.AuxInt = int32ToAuxInt(off)
2292 v.AddArg(x)
2293 return true
2294 }
2295 return false
2296 }
2297 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2298 v_1 := v.Args[1]
2299 v_0 := v.Args[0]
2300
2301
2302
2303 for {
2304 valoff1 := auxIntToValAndOff(v.AuxInt)
2305 sym := auxToSym(v.Aux)
2306 if v_0.Op != OpAMD64ADDQconst {
2307 break
2308 }
2309 off2 := auxIntToInt32(v_0.AuxInt)
2310 base := v_0.Args[0]
2311 mem := v_1
2312 if !(ValAndOff(valoff1).canAdd32(off2)) {
2313 break
2314 }
2315 v.reset(OpAMD64ADDQconstmodify)
2316 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2317 v.Aux = symToAux(sym)
2318 v.AddArg2(base, mem)
2319 return true
2320 }
2321
2322
2323
2324 for {
2325 valoff1 := auxIntToValAndOff(v.AuxInt)
2326 sym1 := auxToSym(v.Aux)
2327 if v_0.Op != OpAMD64LEAQ {
2328 break
2329 }
2330 off2 := auxIntToInt32(v_0.AuxInt)
2331 sym2 := auxToSym(v_0.Aux)
2332 base := v_0.Args[0]
2333 mem := v_1
2334 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2335 break
2336 }
2337 v.reset(OpAMD64ADDQconstmodify)
2338 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2339 v.Aux = symToAux(mergeSym(sym1, sym2))
2340 v.AddArg2(base, mem)
2341 return true
2342 }
2343 return false
2344 }
2345 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2346 v_2 := v.Args[2]
2347 v_1 := v.Args[1]
2348 v_0 := v.Args[0]
2349 b := v.Block
2350 typ := &b.Func.Config.Types
2351
2352
2353
2354 for {
2355 off1 := auxIntToInt32(v.AuxInt)
2356 sym := auxToSym(v.Aux)
2357 val := v_0
2358 if v_1.Op != OpAMD64ADDQconst {
2359 break
2360 }
2361 off2 := auxIntToInt32(v_1.AuxInt)
2362 base := v_1.Args[0]
2363 mem := v_2
2364 if !(is32Bit(int64(off1) + int64(off2))) {
2365 break
2366 }
2367 v.reset(OpAMD64ADDQload)
2368 v.AuxInt = int32ToAuxInt(off1 + off2)
2369 v.Aux = symToAux(sym)
2370 v.AddArg3(val, base, mem)
2371 return true
2372 }
2373
2374
2375
2376 for {
2377 off1 := auxIntToInt32(v.AuxInt)
2378 sym1 := auxToSym(v.Aux)
2379 val := v_0
2380 if v_1.Op != OpAMD64LEAQ {
2381 break
2382 }
2383 off2 := auxIntToInt32(v_1.AuxInt)
2384 sym2 := auxToSym(v_1.Aux)
2385 base := v_1.Args[0]
2386 mem := v_2
2387 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2388 break
2389 }
2390 v.reset(OpAMD64ADDQload)
2391 v.AuxInt = int32ToAuxInt(off1 + off2)
2392 v.Aux = symToAux(mergeSym(sym1, sym2))
2393 v.AddArg3(val, base, mem)
2394 return true
2395 }
2396
2397
2398 for {
2399 off := auxIntToInt32(v.AuxInt)
2400 sym := auxToSym(v.Aux)
2401 x := v_0
2402 ptr := v_1
2403 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2404 break
2405 }
2406 y := v_2.Args[1]
2407 if ptr != v_2.Args[0] {
2408 break
2409 }
2410 v.reset(OpAMD64ADDQ)
2411 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2412 v0.AddArg(y)
2413 v.AddArg2(x, v0)
2414 return true
2415 }
2416 return false
2417 }
2418 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2419 v_2 := v.Args[2]
2420 v_1 := v.Args[1]
2421 v_0 := v.Args[0]
2422
2423
2424
2425 for {
2426 off1 := auxIntToInt32(v.AuxInt)
2427 sym := auxToSym(v.Aux)
2428 if v_0.Op != OpAMD64ADDQconst {
2429 break
2430 }
2431 off2 := auxIntToInt32(v_0.AuxInt)
2432 base := v_0.Args[0]
2433 val := v_1
2434 mem := v_2
2435 if !(is32Bit(int64(off1) + int64(off2))) {
2436 break
2437 }
2438 v.reset(OpAMD64ADDQmodify)
2439 v.AuxInt = int32ToAuxInt(off1 + off2)
2440 v.Aux = symToAux(sym)
2441 v.AddArg3(base, val, mem)
2442 return true
2443 }
2444
2445
2446
2447 for {
2448 off1 := auxIntToInt32(v.AuxInt)
2449 sym1 := auxToSym(v.Aux)
2450 if v_0.Op != OpAMD64LEAQ {
2451 break
2452 }
2453 off2 := auxIntToInt32(v_0.AuxInt)
2454 sym2 := auxToSym(v_0.Aux)
2455 base := v_0.Args[0]
2456 val := v_1
2457 mem := v_2
2458 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2459 break
2460 }
2461 v.reset(OpAMD64ADDQmodify)
2462 v.AuxInt = int32ToAuxInt(off1 + off2)
2463 v.Aux = symToAux(mergeSym(sym1, sym2))
2464 v.AddArg3(base, val, mem)
2465 return true
2466 }
2467 return false
2468 }
2469 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2470 v_1 := v.Args[1]
2471 v_0 := v.Args[0]
2472
2473
2474
2475 for {
2476 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2477 x := v_0
2478 l := v_1
2479 if l.Op != OpAMD64MOVSDload {
2480 continue
2481 }
2482 off := auxIntToInt32(l.AuxInt)
2483 sym := auxToSym(l.Aux)
2484 mem := l.Args[1]
2485 ptr := l.Args[0]
2486 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2487 continue
2488 }
2489 v.reset(OpAMD64ADDSDload)
2490 v.AuxInt = int32ToAuxInt(off)
2491 v.Aux = symToAux(sym)
2492 v.AddArg3(x, ptr, mem)
2493 return true
2494 }
2495 break
2496 }
2497 return false
2498 }
2499 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2500 v_2 := v.Args[2]
2501 v_1 := v.Args[1]
2502 v_0 := v.Args[0]
2503 b := v.Block
2504 typ := &b.Func.Config.Types
2505
2506
2507
2508 for {
2509 off1 := auxIntToInt32(v.AuxInt)
2510 sym := auxToSym(v.Aux)
2511 val := v_0
2512 if v_1.Op != OpAMD64ADDQconst {
2513 break
2514 }
2515 off2 := auxIntToInt32(v_1.AuxInt)
2516 base := v_1.Args[0]
2517 mem := v_2
2518 if !(is32Bit(int64(off1) + int64(off2))) {
2519 break
2520 }
2521 v.reset(OpAMD64ADDSDload)
2522 v.AuxInt = int32ToAuxInt(off1 + off2)
2523 v.Aux = symToAux(sym)
2524 v.AddArg3(val, base, mem)
2525 return true
2526 }
2527
2528
2529
2530 for {
2531 off1 := auxIntToInt32(v.AuxInt)
2532 sym1 := auxToSym(v.Aux)
2533 val := v_0
2534 if v_1.Op != OpAMD64LEAQ {
2535 break
2536 }
2537 off2 := auxIntToInt32(v_1.AuxInt)
2538 sym2 := auxToSym(v_1.Aux)
2539 base := v_1.Args[0]
2540 mem := v_2
2541 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2542 break
2543 }
2544 v.reset(OpAMD64ADDSDload)
2545 v.AuxInt = int32ToAuxInt(off1 + off2)
2546 v.Aux = symToAux(mergeSym(sym1, sym2))
2547 v.AddArg3(val, base, mem)
2548 return true
2549 }
2550
2551
2552 for {
2553 off := auxIntToInt32(v.AuxInt)
2554 sym := auxToSym(v.Aux)
2555 x := v_0
2556 ptr := v_1
2557 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2558 break
2559 }
2560 y := v_2.Args[1]
2561 if ptr != v_2.Args[0] {
2562 break
2563 }
2564 v.reset(OpAMD64ADDSD)
2565 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2566 v0.AddArg(y)
2567 v.AddArg2(x, v0)
2568 return true
2569 }
2570 return false
2571 }
2572 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2573 v_1 := v.Args[1]
2574 v_0 := v.Args[0]
2575
2576
2577
2578 for {
2579 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2580 x := v_0
2581 l := v_1
2582 if l.Op != OpAMD64MOVSSload {
2583 continue
2584 }
2585 off := auxIntToInt32(l.AuxInt)
2586 sym := auxToSym(l.Aux)
2587 mem := l.Args[1]
2588 ptr := l.Args[0]
2589 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2590 continue
2591 }
2592 v.reset(OpAMD64ADDSSload)
2593 v.AuxInt = int32ToAuxInt(off)
2594 v.Aux = symToAux(sym)
2595 v.AddArg3(x, ptr, mem)
2596 return true
2597 }
2598 break
2599 }
2600 return false
2601 }
2602 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2603 v_2 := v.Args[2]
2604 v_1 := v.Args[1]
2605 v_0 := v.Args[0]
2606 b := v.Block
2607 typ := &b.Func.Config.Types
2608
2609
2610
2611 for {
2612 off1 := auxIntToInt32(v.AuxInt)
2613 sym := auxToSym(v.Aux)
2614 val := v_0
2615 if v_1.Op != OpAMD64ADDQconst {
2616 break
2617 }
2618 off2 := auxIntToInt32(v_1.AuxInt)
2619 base := v_1.Args[0]
2620 mem := v_2
2621 if !(is32Bit(int64(off1) + int64(off2))) {
2622 break
2623 }
2624 v.reset(OpAMD64ADDSSload)
2625 v.AuxInt = int32ToAuxInt(off1 + off2)
2626 v.Aux = symToAux(sym)
2627 v.AddArg3(val, base, mem)
2628 return true
2629 }
2630
2631
2632
2633 for {
2634 off1 := auxIntToInt32(v.AuxInt)
2635 sym1 := auxToSym(v.Aux)
2636 val := v_0
2637 if v_1.Op != OpAMD64LEAQ {
2638 break
2639 }
2640 off2 := auxIntToInt32(v_1.AuxInt)
2641 sym2 := auxToSym(v_1.Aux)
2642 base := v_1.Args[0]
2643 mem := v_2
2644 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2645 break
2646 }
2647 v.reset(OpAMD64ADDSSload)
2648 v.AuxInt = int32ToAuxInt(off1 + off2)
2649 v.Aux = symToAux(mergeSym(sym1, sym2))
2650 v.AddArg3(val, base, mem)
2651 return true
2652 }
2653
2654
2655 for {
2656 off := auxIntToInt32(v.AuxInt)
2657 sym := auxToSym(v.Aux)
2658 x := v_0
2659 ptr := v_1
2660 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2661 break
2662 }
2663 y := v_2.Args[1]
2664 if ptr != v_2.Args[0] {
2665 break
2666 }
2667 v.reset(OpAMD64ADDSS)
2668 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2669 v0.AddArg(y)
2670 v.AddArg2(x, v0)
2671 return true
2672 }
2673 return false
2674 }
2675 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2676 v_1 := v.Args[1]
2677 v_0 := v.Args[0]
2678
2679
2680 for {
2681 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2682 if v_0.Op != OpAMD64NOTL {
2683 continue
2684 }
2685 v_0_0 := v_0.Args[0]
2686 if v_0_0.Op != OpAMD64SHLL {
2687 continue
2688 }
2689 y := v_0_0.Args[1]
2690 v_0_0_0 := v_0_0.Args[0]
2691 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2692 continue
2693 }
2694 x := v_1
2695 v.reset(OpAMD64BTRL)
2696 v.AddArg2(x, y)
2697 return true
2698 }
2699 break
2700 }
2701
2702
2703
2704 for {
2705 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2706 if v_0.Op != OpAMD64MOVLconst {
2707 continue
2708 }
2709 c := auxIntToInt32(v_0.AuxInt)
2710 x := v_1
2711 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2712 continue
2713 }
2714 v.reset(OpAMD64BTRLconst)
2715 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2716 v.AddArg(x)
2717 return true
2718 }
2719 break
2720 }
2721
2722
2723 for {
2724 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2725 x := v_0
2726 if v_1.Op != OpAMD64MOVLconst {
2727 continue
2728 }
2729 c := auxIntToInt32(v_1.AuxInt)
2730 v.reset(OpAMD64ANDLconst)
2731 v.AuxInt = int32ToAuxInt(c)
2732 v.AddArg(x)
2733 return true
2734 }
2735 break
2736 }
2737
2738
2739 for {
2740 x := v_0
2741 if x != v_1 {
2742 break
2743 }
2744 v.copyOf(x)
2745 return true
2746 }
2747
2748
2749
2750 for {
2751 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2752 x := v_0
2753 l := v_1
2754 if l.Op != OpAMD64MOVLload {
2755 continue
2756 }
2757 off := auxIntToInt32(l.AuxInt)
2758 sym := auxToSym(l.Aux)
2759 mem := l.Args[1]
2760 ptr := l.Args[0]
2761 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2762 continue
2763 }
2764 v.reset(OpAMD64ANDLload)
2765 v.AuxInt = int32ToAuxInt(off)
2766 v.Aux = symToAux(sym)
2767 v.AddArg3(x, ptr, mem)
2768 return true
2769 }
2770 break
2771 }
2772
2773
2774
2775 for {
2776 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2777 x := v_0
2778 if v_1.Op != OpAMD64NOTL {
2779 continue
2780 }
2781 y := v_1.Args[0]
2782 if !(buildcfg.GOAMD64 >= 3) {
2783 continue
2784 }
2785 v.reset(OpAMD64ANDNL)
2786 v.AddArg2(x, y)
2787 return true
2788 }
2789 break
2790 }
2791
2792
2793
2794 for {
2795 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2796 x := v_0
2797 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2798 continue
2799 }
2800 v.reset(OpAMD64BLSIL)
2801 v.AddArg(x)
2802 return true
2803 }
2804 break
2805 }
2806
2807
2808
2809 for {
2810 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2811 x := v_0
2812 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2813 continue
2814 }
2815 v.reset(OpAMD64BLSRL)
2816 v.AddArg(x)
2817 return true
2818 }
2819 break
2820 }
2821 return false
2822 }
2823 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2824 v_0 := v.Args[0]
2825
2826
2827
2828 for {
2829 c := auxIntToInt32(v.AuxInt)
2830 x := v_0
2831 if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
2832 break
2833 }
2834 v.reset(OpAMD64BTRLconst)
2835 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
2836 v.AddArg(x)
2837 return true
2838 }
2839
2840
2841 for {
2842 c := auxIntToInt32(v.AuxInt)
2843 if v_0.Op != OpAMD64ANDLconst {
2844 break
2845 }
2846 d := auxIntToInt32(v_0.AuxInt)
2847 x := v_0.Args[0]
2848 v.reset(OpAMD64ANDLconst)
2849 v.AuxInt = int32ToAuxInt(c & d)
2850 v.AddArg(x)
2851 return true
2852 }
2853
2854
2855 for {
2856 c := auxIntToInt32(v.AuxInt)
2857 if v_0.Op != OpAMD64BTRLconst {
2858 break
2859 }
2860 d := auxIntToInt8(v_0.AuxInt)
2861 x := v_0.Args[0]
2862 v.reset(OpAMD64ANDLconst)
2863 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
2864 v.AddArg(x)
2865 return true
2866 }
2867
2868
2869 for {
2870 if auxIntToInt32(v.AuxInt) != 0xFF {
2871 break
2872 }
2873 x := v_0
2874 v.reset(OpAMD64MOVBQZX)
2875 v.AddArg(x)
2876 return true
2877 }
2878
2879
2880 for {
2881 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2882 break
2883 }
2884 x := v_0
2885 v.reset(OpAMD64MOVWQZX)
2886 v.AddArg(x)
2887 return true
2888 }
2889
2890
2891
2892 for {
2893 c := auxIntToInt32(v.AuxInt)
2894 if !(c == 0) {
2895 break
2896 }
2897 v.reset(OpAMD64MOVLconst)
2898 v.AuxInt = int32ToAuxInt(0)
2899 return true
2900 }
2901
2902
2903
2904 for {
2905 c := auxIntToInt32(v.AuxInt)
2906 x := v_0
2907 if !(c == -1) {
2908 break
2909 }
2910 v.copyOf(x)
2911 return true
2912 }
2913
2914
2915 for {
2916 c := auxIntToInt32(v.AuxInt)
2917 if v_0.Op != OpAMD64MOVLconst {
2918 break
2919 }
2920 d := auxIntToInt32(v_0.AuxInt)
2921 v.reset(OpAMD64MOVLconst)
2922 v.AuxInt = int32ToAuxInt(c & d)
2923 return true
2924 }
2925 return false
2926 }
2927 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2928 v_1 := v.Args[1]
2929 v_0 := v.Args[0]
2930
2931
2932
2933 for {
2934 valoff1 := auxIntToValAndOff(v.AuxInt)
2935 sym := auxToSym(v.Aux)
2936 if v_0.Op != OpAMD64ADDQconst {
2937 break
2938 }
2939 off2 := auxIntToInt32(v_0.AuxInt)
2940 base := v_0.Args[0]
2941 mem := v_1
2942 if !(ValAndOff(valoff1).canAdd32(off2)) {
2943 break
2944 }
2945 v.reset(OpAMD64ANDLconstmodify)
2946 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2947 v.Aux = symToAux(sym)
2948 v.AddArg2(base, mem)
2949 return true
2950 }
2951
2952
2953
2954 for {
2955 valoff1 := auxIntToValAndOff(v.AuxInt)
2956 sym1 := auxToSym(v.Aux)
2957 if v_0.Op != OpAMD64LEAQ {
2958 break
2959 }
2960 off2 := auxIntToInt32(v_0.AuxInt)
2961 sym2 := auxToSym(v_0.Aux)
2962 base := v_0.Args[0]
2963 mem := v_1
2964 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2965 break
2966 }
2967 v.reset(OpAMD64ANDLconstmodify)
2968 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2969 v.Aux = symToAux(mergeSym(sym1, sym2))
2970 v.AddArg2(base, mem)
2971 return true
2972 }
2973 return false
2974 }
2975 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2976 v_2 := v.Args[2]
2977 v_1 := v.Args[1]
2978 v_0 := v.Args[0]
2979 b := v.Block
2980 typ := &b.Func.Config.Types
2981
2982
2983
2984 for {
2985 off1 := auxIntToInt32(v.AuxInt)
2986 sym := auxToSym(v.Aux)
2987 val := v_0
2988 if v_1.Op != OpAMD64ADDQconst {
2989 break
2990 }
2991 off2 := auxIntToInt32(v_1.AuxInt)
2992 base := v_1.Args[0]
2993 mem := v_2
2994 if !(is32Bit(int64(off1) + int64(off2))) {
2995 break
2996 }
2997 v.reset(OpAMD64ANDLload)
2998 v.AuxInt = int32ToAuxInt(off1 + off2)
2999 v.Aux = symToAux(sym)
3000 v.AddArg3(val, base, mem)
3001 return true
3002 }
3003
3004
3005
3006 for {
3007 off1 := auxIntToInt32(v.AuxInt)
3008 sym1 := auxToSym(v.Aux)
3009 val := v_0
3010 if v_1.Op != OpAMD64LEAQ {
3011 break
3012 }
3013 off2 := auxIntToInt32(v_1.AuxInt)
3014 sym2 := auxToSym(v_1.Aux)
3015 base := v_1.Args[0]
3016 mem := v_2
3017 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3018 break
3019 }
3020 v.reset(OpAMD64ANDLload)
3021 v.AuxInt = int32ToAuxInt(off1 + off2)
3022 v.Aux = symToAux(mergeSym(sym1, sym2))
3023 v.AddArg3(val, base, mem)
3024 return true
3025 }
3026
3027
3028 for {
3029 off := auxIntToInt32(v.AuxInt)
3030 sym := auxToSym(v.Aux)
3031 x := v_0
3032 ptr := v_1
3033 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3034 break
3035 }
3036 y := v_2.Args[1]
3037 if ptr != v_2.Args[0] {
3038 break
3039 }
3040 v.reset(OpAMD64ANDL)
3041 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
3042 v0.AddArg(y)
3043 v.AddArg2(x, v0)
3044 return true
3045 }
3046 return false
3047 }
3048 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
3049 v_2 := v.Args[2]
3050 v_1 := v.Args[1]
3051 v_0 := v.Args[0]
3052
3053
3054
3055 for {
3056 off1 := auxIntToInt32(v.AuxInt)
3057 sym := auxToSym(v.Aux)
3058 if v_0.Op != OpAMD64ADDQconst {
3059 break
3060 }
3061 off2 := auxIntToInt32(v_0.AuxInt)
3062 base := v_0.Args[0]
3063 val := v_1
3064 mem := v_2
3065 if !(is32Bit(int64(off1) + int64(off2))) {
3066 break
3067 }
3068 v.reset(OpAMD64ANDLmodify)
3069 v.AuxInt = int32ToAuxInt(off1 + off2)
3070 v.Aux = symToAux(sym)
3071 v.AddArg3(base, val, mem)
3072 return true
3073 }
3074
3075
3076
3077 for {
3078 off1 := auxIntToInt32(v.AuxInt)
3079 sym1 := auxToSym(v.Aux)
3080 if v_0.Op != OpAMD64LEAQ {
3081 break
3082 }
3083 off2 := auxIntToInt32(v_0.AuxInt)
3084 sym2 := auxToSym(v_0.Aux)
3085 base := v_0.Args[0]
3086 val := v_1
3087 mem := v_2
3088 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3089 break
3090 }
3091 v.reset(OpAMD64ANDLmodify)
3092 v.AuxInt = int32ToAuxInt(off1 + off2)
3093 v.Aux = symToAux(mergeSym(sym1, sym2))
3094 v.AddArg3(base, val, mem)
3095 return true
3096 }
3097 return false
3098 }
3099 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
3100 v_1 := v.Args[1]
3101 v_0 := v.Args[0]
3102
3103
3104 for {
3105 x := v_0
3106 if v_1.Op != OpAMD64SHLL {
3107 break
3108 }
3109 y := v_1.Args[1]
3110 v_1_0 := v_1.Args[0]
3111 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3112 break
3113 }
3114 v.reset(OpAMD64BTRL)
3115 v.AddArg2(x, y)
3116 return true
3117 }
3118 return false
3119 }
3120 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3121 v_1 := v.Args[1]
3122 v_0 := v.Args[0]
3123
3124
3125 for {
3126 x := v_0
3127 if v_1.Op != OpAMD64SHLQ {
3128 break
3129 }
3130 y := v_1.Args[1]
3131 v_1_0 := v_1.Args[0]
3132 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3133 break
3134 }
3135 v.reset(OpAMD64BTRQ)
3136 v.AddArg2(x, y)
3137 return true
3138 }
3139 return false
3140 }
3141 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3142 v_1 := v.Args[1]
3143 v_0 := v.Args[0]
3144
3145
3146 for {
3147 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3148 if v_0.Op != OpAMD64NOTQ {
3149 continue
3150 }
3151 v_0_0 := v_0.Args[0]
3152 if v_0_0.Op != OpAMD64SHLQ {
3153 continue
3154 }
3155 y := v_0_0.Args[1]
3156 v_0_0_0 := v_0_0.Args[0]
3157 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3158 continue
3159 }
3160 x := v_1
3161 v.reset(OpAMD64BTRQ)
3162 v.AddArg2(x, y)
3163 return true
3164 }
3165 break
3166 }
3167
3168
3169
3170 for {
3171 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3172 if v_0.Op != OpAMD64MOVQconst {
3173 continue
3174 }
3175 c := auxIntToInt64(v_0.AuxInt)
3176 x := v_1
3177 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
3178 continue
3179 }
3180 v.reset(OpAMD64BTRQconst)
3181 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3182 v.AddArg(x)
3183 return true
3184 }
3185 break
3186 }
3187
3188
3189
3190 for {
3191 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3192 x := v_0
3193 if v_1.Op != OpAMD64MOVQconst {
3194 continue
3195 }
3196 c := auxIntToInt64(v_1.AuxInt)
3197 if !(is32Bit(c)) {
3198 continue
3199 }
3200 v.reset(OpAMD64ANDQconst)
3201 v.AuxInt = int32ToAuxInt(int32(c))
3202 v.AddArg(x)
3203 return true
3204 }
3205 break
3206 }
3207
3208
3209 for {
3210 x := v_0
3211 if x != v_1 {
3212 break
3213 }
3214 v.copyOf(x)
3215 return true
3216 }
3217
3218
3219
3220 for {
3221 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3222 x := v_0
3223 l := v_1
3224 if l.Op != OpAMD64MOVQload {
3225 continue
3226 }
3227 off := auxIntToInt32(l.AuxInt)
3228 sym := auxToSym(l.Aux)
3229 mem := l.Args[1]
3230 ptr := l.Args[0]
3231 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3232 continue
3233 }
3234 v.reset(OpAMD64ANDQload)
3235 v.AuxInt = int32ToAuxInt(off)
3236 v.Aux = symToAux(sym)
3237 v.AddArg3(x, ptr, mem)
3238 return true
3239 }
3240 break
3241 }
3242
3243
3244
3245 for {
3246 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3247 x := v_0
3248 if v_1.Op != OpAMD64NOTQ {
3249 continue
3250 }
3251 y := v_1.Args[0]
3252 if !(buildcfg.GOAMD64 >= 3) {
3253 continue
3254 }
3255 v.reset(OpAMD64ANDNQ)
3256 v.AddArg2(x, y)
3257 return true
3258 }
3259 break
3260 }
3261
3262
3263
3264 for {
3265 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3266 x := v_0
3267 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3268 continue
3269 }
3270 v.reset(OpAMD64BLSIQ)
3271 v.AddArg(x)
3272 return true
3273 }
3274 break
3275 }
3276
3277
3278
3279 for {
3280 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3281 x := v_0
3282 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3283 continue
3284 }
3285 v.reset(OpAMD64BLSRQ)
3286 v.AddArg(x)
3287 return true
3288 }
3289 break
3290 }
3291 return false
3292 }
3293 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3294 v_0 := v.Args[0]
3295
3296
3297
3298 for {
3299 c := auxIntToInt32(v.AuxInt)
3300 x := v_0
3301 if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
3302 break
3303 }
3304 v.reset(OpAMD64BTRQconst)
3305 v.AuxInt = int8ToAuxInt(int8(log32(^c)))
3306 v.AddArg(x)
3307 return true
3308 }
3309
3310
3311 for {
3312 c := auxIntToInt32(v.AuxInt)
3313 if v_0.Op != OpAMD64ANDQconst {
3314 break
3315 }
3316 d := auxIntToInt32(v_0.AuxInt)
3317 x := v_0.Args[0]
3318 v.reset(OpAMD64ANDQconst)
3319 v.AuxInt = int32ToAuxInt(c & d)
3320 v.AddArg(x)
3321 return true
3322 }
3323
3324
3325
3326 for {
3327 c := auxIntToInt32(v.AuxInt)
3328 if v_0.Op != OpAMD64BTRQconst {
3329 break
3330 }
3331 d := auxIntToInt8(v_0.AuxInt)
3332 x := v_0.Args[0]
3333 if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
3334 break
3335 }
3336 v.reset(OpAMD64ANDQconst)
3337 v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
3338 v.AddArg(x)
3339 return true
3340 }
3341
3342
3343 for {
3344 if auxIntToInt32(v.AuxInt) != 0xFF {
3345 break
3346 }
3347 x := v_0
3348 v.reset(OpAMD64MOVBQZX)
3349 v.AddArg(x)
3350 return true
3351 }
3352
3353
3354 for {
3355 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3356 break
3357 }
3358 x := v_0
3359 v.reset(OpAMD64MOVWQZX)
3360 v.AddArg(x)
3361 return true
3362 }
3363
3364
3365 for {
3366 if auxIntToInt32(v.AuxInt) != 0 {
3367 break
3368 }
3369 v.reset(OpAMD64MOVQconst)
3370 v.AuxInt = int64ToAuxInt(0)
3371 return true
3372 }
3373
3374
3375 for {
3376 if auxIntToInt32(v.AuxInt) != -1 {
3377 break
3378 }
3379 x := v_0
3380 v.copyOf(x)
3381 return true
3382 }
3383
3384
3385 for {
3386 c := auxIntToInt32(v.AuxInt)
3387 if v_0.Op != OpAMD64MOVQconst {
3388 break
3389 }
3390 d := auxIntToInt64(v_0.AuxInt)
3391 v.reset(OpAMD64MOVQconst)
3392 v.AuxInt = int64ToAuxInt(int64(c) & d)
3393 return true
3394 }
3395 return false
3396 }
3397 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3398 v_1 := v.Args[1]
3399 v_0 := v.Args[0]
3400
3401
3402
3403 for {
3404 valoff1 := auxIntToValAndOff(v.AuxInt)
3405 sym := auxToSym(v.Aux)
3406 if v_0.Op != OpAMD64ADDQconst {
3407 break
3408 }
3409 off2 := auxIntToInt32(v_0.AuxInt)
3410 base := v_0.Args[0]
3411 mem := v_1
3412 if !(ValAndOff(valoff1).canAdd32(off2)) {
3413 break
3414 }
3415 v.reset(OpAMD64ANDQconstmodify)
3416 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3417 v.Aux = symToAux(sym)
3418 v.AddArg2(base, mem)
3419 return true
3420 }
3421
3422
3423
3424 for {
3425 valoff1 := auxIntToValAndOff(v.AuxInt)
3426 sym1 := auxToSym(v.Aux)
3427 if v_0.Op != OpAMD64LEAQ {
3428 break
3429 }
3430 off2 := auxIntToInt32(v_0.AuxInt)
3431 sym2 := auxToSym(v_0.Aux)
3432 base := v_0.Args[0]
3433 mem := v_1
3434 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3435 break
3436 }
3437 v.reset(OpAMD64ANDQconstmodify)
3438 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3439 v.Aux = symToAux(mergeSym(sym1, sym2))
3440 v.AddArg2(base, mem)
3441 return true
3442 }
3443 return false
3444 }
3445 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3446 v_2 := v.Args[2]
3447 v_1 := v.Args[1]
3448 v_0 := v.Args[0]
3449 b := v.Block
3450 typ := &b.Func.Config.Types
3451
3452
3453
3454 for {
3455 off1 := auxIntToInt32(v.AuxInt)
3456 sym := auxToSym(v.Aux)
3457 val := v_0
3458 if v_1.Op != OpAMD64ADDQconst {
3459 break
3460 }
3461 off2 := auxIntToInt32(v_1.AuxInt)
3462 base := v_1.Args[0]
3463 mem := v_2
3464 if !(is32Bit(int64(off1) + int64(off2))) {
3465 break
3466 }
3467 v.reset(OpAMD64ANDQload)
3468 v.AuxInt = int32ToAuxInt(off1 + off2)
3469 v.Aux = symToAux(sym)
3470 v.AddArg3(val, base, mem)
3471 return true
3472 }
3473
3474
3475
3476 for {
3477 off1 := auxIntToInt32(v.AuxInt)
3478 sym1 := auxToSym(v.Aux)
3479 val := v_0
3480 if v_1.Op != OpAMD64LEAQ {
3481 break
3482 }
3483 off2 := auxIntToInt32(v_1.AuxInt)
3484 sym2 := auxToSym(v_1.Aux)
3485 base := v_1.Args[0]
3486 mem := v_2
3487 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3488 break
3489 }
3490 v.reset(OpAMD64ANDQload)
3491 v.AuxInt = int32ToAuxInt(off1 + off2)
3492 v.Aux = symToAux(mergeSym(sym1, sym2))
3493 v.AddArg3(val, base, mem)
3494 return true
3495 }
3496
3497
3498 for {
3499 off := auxIntToInt32(v.AuxInt)
3500 sym := auxToSym(v.Aux)
3501 x := v_0
3502 ptr := v_1
3503 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3504 break
3505 }
3506 y := v_2.Args[1]
3507 if ptr != v_2.Args[0] {
3508 break
3509 }
3510 v.reset(OpAMD64ANDQ)
3511 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3512 v0.AddArg(y)
3513 v.AddArg2(x, v0)
3514 return true
3515 }
3516 return false
3517 }
3518 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3519 v_2 := v.Args[2]
3520 v_1 := v.Args[1]
3521 v_0 := v.Args[0]
3522
3523
3524
3525 for {
3526 off1 := auxIntToInt32(v.AuxInt)
3527 sym := auxToSym(v.Aux)
3528 if v_0.Op != OpAMD64ADDQconst {
3529 break
3530 }
3531 off2 := auxIntToInt32(v_0.AuxInt)
3532 base := v_0.Args[0]
3533 val := v_1
3534 mem := v_2
3535 if !(is32Bit(int64(off1) + int64(off2))) {
3536 break
3537 }
3538 v.reset(OpAMD64ANDQmodify)
3539 v.AuxInt = int32ToAuxInt(off1 + off2)
3540 v.Aux = symToAux(sym)
3541 v.AddArg3(base, val, mem)
3542 return true
3543 }
3544
3545
3546
3547 for {
3548 off1 := auxIntToInt32(v.AuxInt)
3549 sym1 := auxToSym(v.Aux)
3550 if v_0.Op != OpAMD64LEAQ {
3551 break
3552 }
3553 off2 := auxIntToInt32(v_0.AuxInt)
3554 sym2 := auxToSym(v_0.Aux)
3555 base := v_0.Args[0]
3556 val := v_1
3557 mem := v_2
3558 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3559 break
3560 }
3561 v.reset(OpAMD64ANDQmodify)
3562 v.AuxInt = int32ToAuxInt(off1 + off2)
3563 v.Aux = symToAux(mergeSym(sym1, sym2))
3564 v.AddArg3(base, val, mem)
3565 return true
3566 }
3567 return false
3568 }
3569 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3570 v_0 := v.Args[0]
3571 b := v.Block
3572
3573
3574 for {
3575 if v_0.Op != OpAMD64ORQconst {
3576 break
3577 }
3578 t := v_0.Type
3579 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3580 break
3581 }
3582 v_0_0 := v_0.Args[0]
3583 if v_0_0.Op != OpAMD64MOVBQZX {
3584 break
3585 }
3586 x := v_0_0.Args[0]
3587 v.reset(OpAMD64BSFQ)
3588 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3589 v0.AuxInt = int32ToAuxInt(1 << 8)
3590 v0.AddArg(x)
3591 v.AddArg(v0)
3592 return true
3593 }
3594
3595
3596 for {
3597 if v_0.Op != OpAMD64ORQconst {
3598 break
3599 }
3600 t := v_0.Type
3601 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3602 break
3603 }
3604 v_0_0 := v_0.Args[0]
3605 if v_0_0.Op != OpAMD64MOVWQZX {
3606 break
3607 }
3608 x := v_0_0.Args[0]
3609 v.reset(OpAMD64BSFQ)
3610 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3611 v0.AuxInt = int32ToAuxInt(1 << 16)
3612 v0.AddArg(x)
3613 v.AddArg(v0)
3614 return true
3615 }
3616 return false
3617 }
3618 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3619 v_0 := v.Args[0]
3620
3621
3622 for {
3623 if v_0.Op != OpAMD64BSWAPL {
3624 break
3625 }
3626 p := v_0.Args[0]
3627 v.copyOf(p)
3628 return true
3629 }
3630
3631
3632
3633 for {
3634 x := v_0
3635 if x.Op != OpAMD64MOVLload {
3636 break
3637 }
3638 i := auxIntToInt32(x.AuxInt)
3639 s := auxToSym(x.Aux)
3640 mem := x.Args[1]
3641 p := x.Args[0]
3642 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3643 break
3644 }
3645 v.reset(OpAMD64MOVBELload)
3646 v.AuxInt = int32ToAuxInt(i)
3647 v.Aux = symToAux(s)
3648 v.AddArg2(p, mem)
3649 return true
3650 }
3651
3652
3653 for {
3654 if v_0.Op != OpAMD64MOVBELload {
3655 break
3656 }
3657 i := auxIntToInt32(v_0.AuxInt)
3658 s := auxToSym(v_0.Aux)
3659 m := v_0.Args[1]
3660 p := v_0.Args[0]
3661 v.reset(OpAMD64MOVLload)
3662 v.AuxInt = int32ToAuxInt(i)
3663 v.Aux = symToAux(s)
3664 v.AddArg2(p, m)
3665 return true
3666 }
3667 return false
3668 }
3669 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3670 v_0 := v.Args[0]
3671
3672
3673 for {
3674 if v_0.Op != OpAMD64BSWAPQ {
3675 break
3676 }
3677 p := v_0.Args[0]
3678 v.copyOf(p)
3679 return true
3680 }
3681
3682
3683
3684 for {
3685 x := v_0
3686 if x.Op != OpAMD64MOVQload {
3687 break
3688 }
3689 i := auxIntToInt32(x.AuxInt)
3690 s := auxToSym(x.Aux)
3691 mem := x.Args[1]
3692 p := x.Args[0]
3693 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3694 break
3695 }
3696 v.reset(OpAMD64MOVBEQload)
3697 v.AuxInt = int32ToAuxInt(i)
3698 v.Aux = symToAux(s)
3699 v.AddArg2(p, mem)
3700 return true
3701 }
3702
3703
3704 for {
3705 if v_0.Op != OpAMD64MOVBEQload {
3706 break
3707 }
3708 i := auxIntToInt32(v_0.AuxInt)
3709 s := auxToSym(v_0.Aux)
3710 m := v_0.Args[1]
3711 p := v_0.Args[0]
3712 v.reset(OpAMD64MOVQload)
3713 v.AuxInt = int32ToAuxInt(i)
3714 v.Aux = symToAux(s)
3715 v.AddArg2(p, m)
3716 return true
3717 }
3718 return false
3719 }
3720 func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
3721 v_0 := v.Args[0]
3722
3723
3724 for {
3725 c := auxIntToInt8(v.AuxInt)
3726 if v_0.Op != OpAMD64XORLconst {
3727 break
3728 }
3729 d := auxIntToInt32(v_0.AuxInt)
3730 x := v_0.Args[0]
3731 v.reset(OpAMD64XORLconst)
3732 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3733 v.AddArg(x)
3734 return true
3735 }
3736
3737
3738 for {
3739 c := auxIntToInt8(v.AuxInt)
3740 if v_0.Op != OpAMD64BTCLconst {
3741 break
3742 }
3743 d := auxIntToInt8(v_0.AuxInt)
3744 x := v_0.Args[0]
3745 v.reset(OpAMD64XORLconst)
3746 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
3747 v.AddArg(x)
3748 return true
3749 }
3750
3751
3752 for {
3753 c := auxIntToInt8(v.AuxInt)
3754 if v_0.Op != OpAMD64MOVLconst {
3755 break
3756 }
3757 d := auxIntToInt32(v_0.AuxInt)
3758 v.reset(OpAMD64MOVLconst)
3759 v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
3760 return true
3761 }
3762 return false
3763 }
3764 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3765 v_0 := v.Args[0]
3766
3767
3768
3769 for {
3770 c := auxIntToInt8(v.AuxInt)
3771 if v_0.Op != OpAMD64XORQconst {
3772 break
3773 }
3774 d := auxIntToInt32(v_0.AuxInt)
3775 x := v_0.Args[0]
3776 if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
3777 break
3778 }
3779 v.reset(OpAMD64XORQconst)
3780 v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
3781 v.AddArg(x)
3782 return true
3783 }
3784
3785
3786
3787 for {
3788 c := auxIntToInt8(v.AuxInt)
3789 if v_0.Op != OpAMD64BTCQconst {
3790 break
3791 }
3792 d := auxIntToInt8(v_0.AuxInt)
3793 x := v_0.Args[0]
3794 if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
3795 break
3796 }
3797 v.reset(OpAMD64XORQconst)
3798 v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
3799 v.AddArg(x)
3800 return true
3801 }
3802
3803
3804 for {
3805 c := auxIntToInt8(v.AuxInt)
3806 if v_0.Op != OpAMD64MOVQconst {
3807 break
3808 }
3809 d := auxIntToInt64(v_0.AuxInt)
3810 v.reset(OpAMD64MOVQconst)
3811 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3812 return true
3813 }
3814 return false
3815 }
3816 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3817 v_0 := v.Args[0]
3818
3819
3820
3821 for {
3822 c := auxIntToInt8(v.AuxInt)
3823 if v_0.Op != OpAMD64SHRQconst {
3824 break
3825 }
3826 d := auxIntToInt8(v_0.AuxInt)
3827 x := v_0.Args[0]
3828 if !((c + d) < 64) {
3829 break
3830 }
3831 v.reset(OpAMD64BTQconst)
3832 v.AuxInt = int8ToAuxInt(c + d)
3833 v.AddArg(x)
3834 return true
3835 }
3836
3837
3838
3839 for {
3840 c := auxIntToInt8(v.AuxInt)
3841 if v_0.Op != OpAMD64SHLQconst {
3842 break
3843 }
3844 d := auxIntToInt8(v_0.AuxInt)
3845 x := v_0.Args[0]
3846 if !(c > d) {
3847 break
3848 }
3849 v.reset(OpAMD64BTLconst)
3850 v.AuxInt = int8ToAuxInt(c - d)
3851 v.AddArg(x)
3852 return true
3853 }
3854
3855
3856 for {
3857 if auxIntToInt8(v.AuxInt) != 0 {
3858 break
3859 }
3860 s := v_0
3861 if s.Op != OpAMD64SHRQ {
3862 break
3863 }
3864 y := s.Args[1]
3865 x := s.Args[0]
3866 v.reset(OpAMD64BTQ)
3867 v.AddArg2(y, x)
3868 return true
3869 }
3870
3871
3872
3873 for {
3874 c := auxIntToInt8(v.AuxInt)
3875 if v_0.Op != OpAMD64SHRLconst {
3876 break
3877 }
3878 d := auxIntToInt8(v_0.AuxInt)
3879 x := v_0.Args[0]
3880 if !((c + d) < 32) {
3881 break
3882 }
3883 v.reset(OpAMD64BTLconst)
3884 v.AuxInt = int8ToAuxInt(c + d)
3885 v.AddArg(x)
3886 return true
3887 }
3888
3889
3890
3891 for {
3892 c := auxIntToInt8(v.AuxInt)
3893 if v_0.Op != OpAMD64SHLLconst {
3894 break
3895 }
3896 d := auxIntToInt8(v_0.AuxInt)
3897 x := v_0.Args[0]
3898 if !(c > d) {
3899 break
3900 }
3901 v.reset(OpAMD64BTLconst)
3902 v.AuxInt = int8ToAuxInt(c - d)
3903 v.AddArg(x)
3904 return true
3905 }
3906
3907
3908 for {
3909 if auxIntToInt8(v.AuxInt) != 0 {
3910 break
3911 }
3912 s := v_0
3913 if s.Op != OpAMD64SHRL {
3914 break
3915 }
3916 y := s.Args[1]
3917 x := s.Args[0]
3918 v.reset(OpAMD64BTL)
3919 v.AddArg2(y, x)
3920 return true
3921 }
3922 return false
3923 }
3924 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3925 v_0 := v.Args[0]
3926
3927
3928
3929 for {
3930 c := auxIntToInt8(v.AuxInt)
3931 if v_0.Op != OpAMD64SHRQconst {
3932 break
3933 }
3934 d := auxIntToInt8(v_0.AuxInt)
3935 x := v_0.Args[0]
3936 if !((c + d) < 64) {
3937 break
3938 }
3939 v.reset(OpAMD64BTQconst)
3940 v.AuxInt = int8ToAuxInt(c + d)
3941 v.AddArg(x)
3942 return true
3943 }
3944
3945
3946
3947 for {
3948 c := auxIntToInt8(v.AuxInt)
3949 if v_0.Op != OpAMD64SHLQconst {
3950 break
3951 }
3952 d := auxIntToInt8(v_0.AuxInt)
3953 x := v_0.Args[0]
3954 if !(c > d) {
3955 break
3956 }
3957 v.reset(OpAMD64BTQconst)
3958 v.AuxInt = int8ToAuxInt(c - d)
3959 v.AddArg(x)
3960 return true
3961 }
3962
3963
3964 for {
3965 if auxIntToInt8(v.AuxInt) != 0 {
3966 break
3967 }
3968 s := v_0
3969 if s.Op != OpAMD64SHRQ {
3970 break
3971 }
3972 y := s.Args[1]
3973 x := s.Args[0]
3974 v.reset(OpAMD64BTQ)
3975 v.AddArg2(y, x)
3976 return true
3977 }
3978 return false
3979 }
3980 func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
3981 v_0 := v.Args[0]
3982
3983
3984 for {
3985 c := auxIntToInt8(v.AuxInt)
3986 if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
3987 break
3988 }
3989 x := v_0.Args[0]
3990 v.reset(OpAMD64BTRLconst)
3991 v.AuxInt = int8ToAuxInt(c)
3992 v.AddArg(x)
3993 return true
3994 }
3995
3996
3997 for {
3998 c := auxIntToInt8(v.AuxInt)
3999 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4000 break
4001 }
4002 x := v_0.Args[0]
4003 v.reset(OpAMD64BTRLconst)
4004 v.AuxInt = int8ToAuxInt(c)
4005 v.AddArg(x)
4006 return true
4007 }
4008
4009
4010 for {
4011 c := auxIntToInt8(v.AuxInt)
4012 if v_0.Op != OpAMD64ANDLconst {
4013 break
4014 }
4015 d := auxIntToInt32(v_0.AuxInt)
4016 x := v_0.Args[0]
4017 v.reset(OpAMD64ANDLconst)
4018 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4019 v.AddArg(x)
4020 return true
4021 }
4022
4023
4024 for {
4025 c := auxIntToInt8(v.AuxInt)
4026 if v_0.Op != OpAMD64BTRLconst {
4027 break
4028 }
4029 d := auxIntToInt8(v_0.AuxInt)
4030 x := v_0.Args[0]
4031 v.reset(OpAMD64ANDLconst)
4032 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4033 v.AddArg(x)
4034 return true
4035 }
4036
4037
4038 for {
4039 c := auxIntToInt8(v.AuxInt)
4040 if v_0.Op != OpAMD64MOVLconst {
4041 break
4042 }
4043 d := auxIntToInt32(v_0.AuxInt)
4044 v.reset(OpAMD64MOVLconst)
4045 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4046 return true
4047 }
4048 return false
4049 }
4050 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
4051 v_0 := v.Args[0]
4052
4053
4054 for {
4055 c := auxIntToInt8(v.AuxInt)
4056 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
4057 break
4058 }
4059 x := v_0.Args[0]
4060 v.reset(OpAMD64BTRQconst)
4061 v.AuxInt = int8ToAuxInt(c)
4062 v.AddArg(x)
4063 return true
4064 }
4065
4066
4067 for {
4068 c := auxIntToInt8(v.AuxInt)
4069 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4070 break
4071 }
4072 x := v_0.Args[0]
4073 v.reset(OpAMD64BTRQconst)
4074 v.AuxInt = int8ToAuxInt(c)
4075 v.AddArg(x)
4076 return true
4077 }
4078
4079
4080
4081 for {
4082 c := auxIntToInt8(v.AuxInt)
4083 if v_0.Op != OpAMD64ANDQconst {
4084 break
4085 }
4086 d := auxIntToInt32(v_0.AuxInt)
4087 x := v_0.Args[0]
4088 if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
4089 break
4090 }
4091 v.reset(OpAMD64ANDQconst)
4092 v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
4093 v.AddArg(x)
4094 return true
4095 }
4096
4097
4098
4099 for {
4100 c := auxIntToInt8(v.AuxInt)
4101 if v_0.Op != OpAMD64BTRQconst {
4102 break
4103 }
4104 d := auxIntToInt8(v_0.AuxInt)
4105 x := v_0.Args[0]
4106 if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
4107 break
4108 }
4109 v.reset(OpAMD64ANDQconst)
4110 v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
4111 v.AddArg(x)
4112 return true
4113 }
4114
4115
4116 for {
4117 c := auxIntToInt8(v.AuxInt)
4118 if v_0.Op != OpAMD64MOVQconst {
4119 break
4120 }
4121 d := auxIntToInt64(v_0.AuxInt)
4122 v.reset(OpAMD64MOVQconst)
4123 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
4124 return true
4125 }
4126 return false
4127 }
4128 func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
4129 v_0 := v.Args[0]
4130
4131
4132 for {
4133 c := auxIntToInt8(v.AuxInt)
4134 if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
4135 break
4136 }
4137 x := v_0.Args[0]
4138 v.reset(OpAMD64BTSLconst)
4139 v.AuxInt = int8ToAuxInt(c)
4140 v.AddArg(x)
4141 return true
4142 }
4143
4144
4145 for {
4146 c := auxIntToInt8(v.AuxInt)
4147 if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
4148 break
4149 }
4150 x := v_0.Args[0]
4151 v.reset(OpAMD64BTSLconst)
4152 v.AuxInt = int8ToAuxInt(c)
4153 v.AddArg(x)
4154 return true
4155 }
4156
4157
4158 for {
4159 c := auxIntToInt8(v.AuxInt)
4160 if v_0.Op != OpAMD64ORLconst {
4161 break
4162 }
4163 d := auxIntToInt32(v_0.AuxInt)
4164 x := v_0.Args[0]
4165 v.reset(OpAMD64ORLconst)
4166 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4167 v.AddArg(x)
4168 return true
4169 }
4170
4171
4172 for {
4173 c := auxIntToInt8(v.AuxInt)
4174 if v_0.Op != OpAMD64BTSLconst {
4175 break
4176 }
4177 d := auxIntToInt8(v_0.AuxInt)
4178 x := v_0.Args[0]
4179 v.reset(OpAMD64ORLconst)
4180 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4181 v.AddArg(x)
4182 return true
4183 }
4184
4185
4186 for {
4187 c := auxIntToInt8(v.AuxInt)
4188 if v_0.Op != OpAMD64MOVLconst {
4189 break
4190 }
4191 d := auxIntToInt32(v_0.AuxInt)
4192 v.reset(OpAMD64MOVLconst)
4193 v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
4194 return true
4195 }
4196 return false
4197 }
4198 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
4199 v_0 := v.Args[0]
4200
4201
4202 for {
4203 c := auxIntToInt8(v.AuxInt)
4204 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
4205 break
4206 }
4207 x := v_0.Args[0]
4208 v.reset(OpAMD64BTSQconst)
4209 v.AuxInt = int8ToAuxInt(c)
4210 v.AddArg(x)
4211 return true
4212 }
4213
4214
4215 for {
4216 c := auxIntToInt8(v.AuxInt)
4217 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
4218 break
4219 }
4220 x := v_0.Args[0]
4221 v.reset(OpAMD64BTSQconst)
4222 v.AuxInt = int8ToAuxInt(c)
4223 v.AddArg(x)
4224 return true
4225 }
4226
4227
4228
4229 for {
4230 c := auxIntToInt8(v.AuxInt)
4231 if v_0.Op != OpAMD64ORQconst {
4232 break
4233 }
4234 d := auxIntToInt32(v_0.AuxInt)
4235 x := v_0.Args[0]
4236 if !(is32Bit(int64(d) | 1<<uint32(c))) {
4237 break
4238 }
4239 v.reset(OpAMD64ORQconst)
4240 v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
4241 v.AddArg(x)
4242 return true
4243 }
4244
4245
4246
4247 for {
4248 c := auxIntToInt8(v.AuxInt)
4249 if v_0.Op != OpAMD64BTSQconst {
4250 break
4251 }
4252 d := auxIntToInt8(v_0.AuxInt)
4253 x := v_0.Args[0]
4254 if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
4255 break
4256 }
4257 v.reset(OpAMD64ORQconst)
4258 v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
4259 v.AddArg(x)
4260 return true
4261 }
4262
4263
4264 for {
4265 c := auxIntToInt8(v.AuxInt)
4266 if v_0.Op != OpAMD64MOVQconst {
4267 break
4268 }
4269 d := auxIntToInt64(v_0.AuxInt)
4270 v.reset(OpAMD64MOVQconst)
4271 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
4272 return true
4273 }
4274 return false
4275 }
4276 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
4277 v_2 := v.Args[2]
4278 v_1 := v.Args[1]
4279 v_0 := v.Args[0]
4280
4281
4282 for {
4283 x := v_0
4284 y := v_1
4285 if v_2.Op != OpAMD64InvertFlags {
4286 break
4287 }
4288 cond := v_2.Args[0]
4289 v.reset(OpAMD64CMOVLLS)
4290 v.AddArg3(x, y, cond)
4291 return true
4292 }
4293
4294
4295 for {
4296 x := v_1
4297 if v_2.Op != OpAMD64FlagEQ {
4298 break
4299 }
4300 v.copyOf(x)
4301 return true
4302 }
4303
4304
4305 for {
4306 x := v_1
4307 if v_2.Op != OpAMD64FlagGT_UGT {
4308 break
4309 }
4310 v.copyOf(x)
4311 return true
4312 }
4313
4314
4315 for {
4316 y := v_0
4317 if v_2.Op != OpAMD64FlagGT_ULT {
4318 break
4319 }
4320 v.copyOf(y)
4321 return true
4322 }
4323
4324
4325 for {
4326 y := v_0
4327 if v_2.Op != OpAMD64FlagLT_ULT {
4328 break
4329 }
4330 v.copyOf(y)
4331 return true
4332 }
4333
4334
4335 for {
4336 x := v_1
4337 if v_2.Op != OpAMD64FlagLT_UGT {
4338 break
4339 }
4340 v.copyOf(x)
4341 return true
4342 }
4343 return false
4344 }
4345 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4346 v_2 := v.Args[2]
4347 v_1 := v.Args[1]
4348 v_0 := v.Args[0]
4349
4350
4351 for {
4352 x := v_0
4353 y := v_1
4354 if v_2.Op != OpAMD64InvertFlags {
4355 break
4356 }
4357 cond := v_2.Args[0]
4358 v.reset(OpAMD64CMOVLHI)
4359 v.AddArg3(x, y, cond)
4360 return true
4361 }
4362
4363
4364 for {
4365 y := v_0
4366 if v_2.Op != OpAMD64FlagEQ {
4367 break
4368 }
4369 v.copyOf(y)
4370 return true
4371 }
4372
4373
4374 for {
4375 y := v_0
4376 if v_2.Op != OpAMD64FlagGT_UGT {
4377 break
4378 }
4379 v.copyOf(y)
4380 return true
4381 }
4382
4383
4384 for {
4385 x := v_1
4386 if v_2.Op != OpAMD64FlagGT_ULT {
4387 break
4388 }
4389 v.copyOf(x)
4390 return true
4391 }
4392
4393
4394 for {
4395 x := v_1
4396 if v_2.Op != OpAMD64FlagLT_ULT {
4397 break
4398 }
4399 v.copyOf(x)
4400 return true
4401 }
4402
4403
4404 for {
4405 y := v_0
4406 if v_2.Op != OpAMD64FlagLT_UGT {
4407 break
4408 }
4409 v.copyOf(y)
4410 return true
4411 }
4412 return false
4413 }
4414 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4415 v_2 := v.Args[2]
4416 v_1 := v.Args[1]
4417 v_0 := v.Args[0]
4418
4419
4420 for {
4421 x := v_0
4422 y := v_1
4423 if v_2.Op != OpAMD64InvertFlags {
4424 break
4425 }
4426 cond := v_2.Args[0]
4427 v.reset(OpAMD64CMOVLEQ)
4428 v.AddArg3(x, y, cond)
4429 return true
4430 }
4431
4432
4433 for {
4434 x := v_1
4435 if v_2.Op != OpAMD64FlagEQ {
4436 break
4437 }
4438 v.copyOf(x)
4439 return true
4440 }
4441
4442
4443 for {
4444 y := v_0
4445 if v_2.Op != OpAMD64FlagGT_UGT {
4446 break
4447 }
4448 v.copyOf(y)
4449 return true
4450 }
4451
4452
4453 for {
4454 y := v_0
4455 if v_2.Op != OpAMD64FlagGT_ULT {
4456 break
4457 }
4458 v.copyOf(y)
4459 return true
4460 }
4461
4462
4463 for {
4464 y := v_0
4465 if v_2.Op != OpAMD64FlagLT_ULT {
4466 break
4467 }
4468 v.copyOf(y)
4469 return true
4470 }
4471
4472
4473 for {
4474 y := v_0
4475 if v_2.Op != OpAMD64FlagLT_UGT {
4476 break
4477 }
4478 v.copyOf(y)
4479 return true
4480 }
4481 return false
4482 }
4483 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4484 v_2 := v.Args[2]
4485 v_1 := v.Args[1]
4486 v_0 := v.Args[0]
4487
4488
4489 for {
4490 x := v_0
4491 y := v_1
4492 if v_2.Op != OpAMD64InvertFlags {
4493 break
4494 }
4495 cond := v_2.Args[0]
4496 v.reset(OpAMD64CMOVLLE)
4497 v.AddArg3(x, y, cond)
4498 return true
4499 }
4500
4501
4502 for {
4503 x := v_1
4504 if v_2.Op != OpAMD64FlagEQ {
4505 break
4506 }
4507 v.copyOf(x)
4508 return true
4509 }
4510
4511
4512 for {
4513 x := v_1
4514 if v_2.Op != OpAMD64FlagGT_UGT {
4515 break
4516 }
4517 v.copyOf(x)
4518 return true
4519 }
4520
4521
4522 for {
4523 x := v_1
4524 if v_2.Op != OpAMD64FlagGT_ULT {
4525 break
4526 }
4527 v.copyOf(x)
4528 return true
4529 }
4530
4531
4532 for {
4533 y := v_0
4534 if v_2.Op != OpAMD64FlagLT_ULT {
4535 break
4536 }
4537 v.copyOf(y)
4538 return true
4539 }
4540
4541
4542 for {
4543 y := v_0
4544 if v_2.Op != OpAMD64FlagLT_UGT {
4545 break
4546 }
4547 v.copyOf(y)
4548 return true
4549 }
4550 return false
4551 }
4552 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4553 v_2 := v.Args[2]
4554 v_1 := v.Args[1]
4555 v_0 := v.Args[0]
4556
4557
4558 for {
4559 x := v_0
4560 y := v_1
4561 if v_2.Op != OpAMD64InvertFlags {
4562 break
4563 }
4564 cond := v_2.Args[0]
4565 v.reset(OpAMD64CMOVLLT)
4566 v.AddArg3(x, y, cond)
4567 return true
4568 }
4569
4570
4571 for {
4572 y := v_0
4573 if v_2.Op != OpAMD64FlagEQ {
4574 break
4575 }
4576 v.copyOf(y)
4577 return true
4578 }
4579
4580
4581 for {
4582 x := v_1
4583 if v_2.Op != OpAMD64FlagGT_UGT {
4584 break
4585 }
4586 v.copyOf(x)
4587 return true
4588 }
4589
4590
4591 for {
4592 x := v_1
4593 if v_2.Op != OpAMD64FlagGT_ULT {
4594 break
4595 }
4596 v.copyOf(x)
4597 return true
4598 }
4599
4600
4601 for {
4602 y := v_0
4603 if v_2.Op != OpAMD64FlagLT_ULT {
4604 break
4605 }
4606 v.copyOf(y)
4607 return true
4608 }
4609
4610
4611 for {
4612 y := v_0
4613 if v_2.Op != OpAMD64FlagLT_UGT {
4614 break
4615 }
4616 v.copyOf(y)
4617 return true
4618 }
4619 return false
4620 }
4621 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4622 v_2 := v.Args[2]
4623 v_1 := v.Args[1]
4624 v_0 := v.Args[0]
4625
4626
4627 for {
4628 x := v_0
4629 y := v_1
4630 if v_2.Op != OpAMD64InvertFlags {
4631 break
4632 }
4633 cond := v_2.Args[0]
4634 v.reset(OpAMD64CMOVLCS)
4635 v.AddArg3(x, y, cond)
4636 return true
4637 }
4638
4639
4640 for {
4641 y := v_0
4642 if v_2.Op != OpAMD64FlagEQ {
4643 break
4644 }
4645 v.copyOf(y)
4646 return true
4647 }
4648
4649
4650 for {
4651 x := v_1
4652 if v_2.Op != OpAMD64FlagGT_UGT {
4653 break
4654 }
4655 v.copyOf(x)
4656 return true
4657 }
4658
4659
4660 for {
4661 y := v_0
4662 if v_2.Op != OpAMD64FlagGT_ULT {
4663 break
4664 }
4665 v.copyOf(y)
4666 return true
4667 }
4668
4669
4670 for {
4671 y := v_0
4672 if v_2.Op != OpAMD64FlagLT_ULT {
4673 break
4674 }
4675 v.copyOf(y)
4676 return true
4677 }
4678
4679
4680 for {
4681 x := v_1
4682 if v_2.Op != OpAMD64FlagLT_UGT {
4683 break
4684 }
4685 v.copyOf(x)
4686 return true
4687 }
4688 return false
4689 }
4690 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4691 v_2 := v.Args[2]
4692 v_1 := v.Args[1]
4693 v_0 := v.Args[0]
4694
4695
4696 for {
4697 x := v_0
4698 y := v_1
4699 if v_2.Op != OpAMD64InvertFlags {
4700 break
4701 }
4702 cond := v_2.Args[0]
4703 v.reset(OpAMD64CMOVLGE)
4704 v.AddArg3(x, y, cond)
4705 return true
4706 }
4707
4708
4709 for {
4710 x := v_1
4711 if v_2.Op != OpAMD64FlagEQ {
4712 break
4713 }
4714 v.copyOf(x)
4715 return true
4716 }
4717
4718
4719 for {
4720 y := v_0
4721 if v_2.Op != OpAMD64FlagGT_UGT {
4722 break
4723 }
4724 v.copyOf(y)
4725 return true
4726 }
4727
4728
4729 for {
4730 y := v_0
4731 if v_2.Op != OpAMD64FlagGT_ULT {
4732 break
4733 }
4734 v.copyOf(y)
4735 return true
4736 }
4737
4738
4739 for {
4740 x := v_1
4741 if v_2.Op != OpAMD64FlagLT_ULT {
4742 break
4743 }
4744 v.copyOf(x)
4745 return true
4746 }
4747
4748
4749 for {
4750 x := v_1
4751 if v_2.Op != OpAMD64FlagLT_UGT {
4752 break
4753 }
4754 v.copyOf(x)
4755 return true
4756 }
4757 return false
4758 }
4759 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4760 v_2 := v.Args[2]
4761 v_1 := v.Args[1]
4762 v_0 := v.Args[0]
4763
4764
4765 for {
4766 x := v_0
4767 y := v_1
4768 if v_2.Op != OpAMD64InvertFlags {
4769 break
4770 }
4771 cond := v_2.Args[0]
4772 v.reset(OpAMD64CMOVLCC)
4773 v.AddArg3(x, y, cond)
4774 return true
4775 }
4776
4777
4778 for {
4779 x := v_1
4780 if v_2.Op != OpAMD64FlagEQ {
4781 break
4782 }
4783 v.copyOf(x)
4784 return true
4785 }
4786
4787
4788 for {
4789 y := v_0
4790 if v_2.Op != OpAMD64FlagGT_UGT {
4791 break
4792 }
4793 v.copyOf(y)
4794 return true
4795 }
4796
4797
4798 for {
4799 x := v_1
4800 if v_2.Op != OpAMD64FlagGT_ULT {
4801 break
4802 }
4803 v.copyOf(x)
4804 return true
4805 }
4806
4807
4808 for {
4809 x := v_1
4810 if v_2.Op != OpAMD64FlagLT_ULT {
4811 break
4812 }
4813 v.copyOf(x)
4814 return true
4815 }
4816
4817
4818 for {
4819 y := v_0
4820 if v_2.Op != OpAMD64FlagLT_UGT {
4821 break
4822 }
4823 v.copyOf(y)
4824 return true
4825 }
4826 return false
4827 }
4828 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4829 v_2 := v.Args[2]
4830 v_1 := v.Args[1]
4831 v_0 := v.Args[0]
4832
4833
4834 for {
4835 x := v_0
4836 y := v_1
4837 if v_2.Op != OpAMD64InvertFlags {
4838 break
4839 }
4840 cond := v_2.Args[0]
4841 v.reset(OpAMD64CMOVLGT)
4842 v.AddArg3(x, y, cond)
4843 return true
4844 }
4845
4846
4847 for {
4848 y := v_0
4849 if v_2.Op != OpAMD64FlagEQ {
4850 break
4851 }
4852 v.copyOf(y)
4853 return true
4854 }
4855
4856
4857 for {
4858 y := v_0
4859 if v_2.Op != OpAMD64FlagGT_UGT {
4860 break
4861 }
4862 v.copyOf(y)
4863 return true
4864 }
4865
4866
4867 for {
4868 y := v_0
4869 if v_2.Op != OpAMD64FlagGT_ULT {
4870 break
4871 }
4872 v.copyOf(y)
4873 return true
4874 }
4875
4876
4877 for {
4878 x := v_1
4879 if v_2.Op != OpAMD64FlagLT_ULT {
4880 break
4881 }
4882 v.copyOf(x)
4883 return true
4884 }
4885
4886
4887 for {
4888 x := v_1
4889 if v_2.Op != OpAMD64FlagLT_UGT {
4890 break
4891 }
4892 v.copyOf(x)
4893 return true
4894 }
4895 return false
4896 }
4897 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4898 v_2 := v.Args[2]
4899 v_1 := v.Args[1]
4900 v_0 := v.Args[0]
4901
4902
4903 for {
4904 x := v_0
4905 y := v_1
4906 if v_2.Op != OpAMD64InvertFlags {
4907 break
4908 }
4909 cond := v_2.Args[0]
4910 v.reset(OpAMD64CMOVLNE)
4911 v.AddArg3(x, y, cond)
4912 return true
4913 }
4914
4915
4916 for {
4917 y := v_0
4918 if v_2.Op != OpAMD64FlagEQ {
4919 break
4920 }
4921 v.copyOf(y)
4922 return true
4923 }
4924
4925
4926 for {
4927 x := v_1
4928 if v_2.Op != OpAMD64FlagGT_UGT {
4929 break
4930 }
4931 v.copyOf(x)
4932 return true
4933 }
4934
4935
4936 for {
4937 x := v_1
4938 if v_2.Op != OpAMD64FlagGT_ULT {
4939 break
4940 }
4941 v.copyOf(x)
4942 return true
4943 }
4944
4945
4946 for {
4947 x := v_1
4948 if v_2.Op != OpAMD64FlagLT_ULT {
4949 break
4950 }
4951 v.copyOf(x)
4952 return true
4953 }
4954
4955
4956 for {
4957 x := v_1
4958 if v_2.Op != OpAMD64FlagLT_UGT {
4959 break
4960 }
4961 v.copyOf(x)
4962 return true
4963 }
4964 return false
4965 }
4966 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4967 v_2 := v.Args[2]
4968 v_1 := v.Args[1]
4969 v_0 := v.Args[0]
4970
4971
4972 for {
4973 x := v_0
4974 y := v_1
4975 if v_2.Op != OpAMD64InvertFlags {
4976 break
4977 }
4978 cond := v_2.Args[0]
4979 v.reset(OpAMD64CMOVQLS)
4980 v.AddArg3(x, y, cond)
4981 return true
4982 }
4983
4984
4985 for {
4986 x := v_1
4987 if v_2.Op != OpAMD64FlagEQ {
4988 break
4989 }
4990 v.copyOf(x)
4991 return true
4992 }
4993
4994
4995 for {
4996 x := v_1
4997 if v_2.Op != OpAMD64FlagGT_UGT {
4998 break
4999 }
5000 v.copyOf(x)
5001 return true
5002 }
5003
5004
5005 for {
5006 y := v_0
5007 if v_2.Op != OpAMD64FlagGT_ULT {
5008 break
5009 }
5010 v.copyOf(y)
5011 return true
5012 }
5013
5014
5015 for {
5016 y := v_0
5017 if v_2.Op != OpAMD64FlagLT_ULT {
5018 break
5019 }
5020 v.copyOf(y)
5021 return true
5022 }
5023
5024
5025 for {
5026 x := v_1
5027 if v_2.Op != OpAMD64FlagLT_UGT {
5028 break
5029 }
5030 v.copyOf(x)
5031 return true
5032 }
5033 return false
5034 }
5035 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
5036 v_2 := v.Args[2]
5037 v_1 := v.Args[1]
5038 v_0 := v.Args[0]
5039
5040
5041 for {
5042 x := v_0
5043 y := v_1
5044 if v_2.Op != OpAMD64InvertFlags {
5045 break
5046 }
5047 cond := v_2.Args[0]
5048 v.reset(OpAMD64CMOVQHI)
5049 v.AddArg3(x, y, cond)
5050 return true
5051 }
5052
5053
5054 for {
5055 y := v_0
5056 if v_2.Op != OpAMD64FlagEQ {
5057 break
5058 }
5059 v.copyOf(y)
5060 return true
5061 }
5062
5063
5064 for {
5065 y := v_0
5066 if v_2.Op != OpAMD64FlagGT_UGT {
5067 break
5068 }
5069 v.copyOf(y)
5070 return true
5071 }
5072
5073
5074 for {
5075 x := v_1
5076 if v_2.Op != OpAMD64FlagGT_ULT {
5077 break
5078 }
5079 v.copyOf(x)
5080 return true
5081 }
5082
5083
5084 for {
5085 x := v_1
5086 if v_2.Op != OpAMD64FlagLT_ULT {
5087 break
5088 }
5089 v.copyOf(x)
5090 return true
5091 }
5092
5093
5094 for {
5095 y := v_0
5096 if v_2.Op != OpAMD64FlagLT_UGT {
5097 break
5098 }
5099 v.copyOf(y)
5100 return true
5101 }
5102 return false
5103 }
5104 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5105 v_2 := v.Args[2]
5106 v_1 := v.Args[1]
5107 v_0 := v.Args[0]
5108
5109
5110 for {
5111 x := v_0
5112 y := v_1
5113 if v_2.Op != OpAMD64InvertFlags {
5114 break
5115 }
5116 cond := v_2.Args[0]
5117 v.reset(OpAMD64CMOVQEQ)
5118 v.AddArg3(x, y, cond)
5119 return true
5120 }
5121
5122
5123 for {
5124 x := v_1
5125 if v_2.Op != OpAMD64FlagEQ {
5126 break
5127 }
5128 v.copyOf(x)
5129 return true
5130 }
5131
5132
5133 for {
5134 y := v_0
5135 if v_2.Op != OpAMD64FlagGT_UGT {
5136 break
5137 }
5138 v.copyOf(y)
5139 return true
5140 }
5141
5142
5143 for {
5144 y := v_0
5145 if v_2.Op != OpAMD64FlagGT_ULT {
5146 break
5147 }
5148 v.copyOf(y)
5149 return true
5150 }
5151
5152
5153 for {
5154 y := v_0
5155 if v_2.Op != OpAMD64FlagLT_ULT {
5156 break
5157 }
5158 v.copyOf(y)
5159 return true
5160 }
5161
5162
5163 for {
5164 y := v_0
5165 if v_2.Op != OpAMD64FlagLT_UGT {
5166 break
5167 }
5168 v.copyOf(y)
5169 return true
5170 }
5171
5172
5173
5174 for {
5175 x := v_0
5176 if v_2.Op != OpSelect1 {
5177 break
5178 }
5179 v_2_0 := v_2.Args[0]
5180 if v_2_0.Op != OpAMD64BSFQ {
5181 break
5182 }
5183 v_2_0_0 := v_2_0.Args[0]
5184 if v_2_0_0.Op != OpAMD64ORQconst {
5185 break
5186 }
5187 c := auxIntToInt32(v_2_0_0.AuxInt)
5188 if !(c != 0) {
5189 break
5190 }
5191 v.copyOf(x)
5192 return true
5193 }
5194 return false
5195 }
5196 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5197 v_2 := v.Args[2]
5198 v_1 := v.Args[1]
5199 v_0 := v.Args[0]
5200
5201
5202 for {
5203 x := v_0
5204 y := v_1
5205 if v_2.Op != OpAMD64InvertFlags {
5206 break
5207 }
5208 cond := v_2.Args[0]
5209 v.reset(OpAMD64CMOVQLE)
5210 v.AddArg3(x, y, cond)
5211 return true
5212 }
5213
5214
5215 for {
5216 x := v_1
5217 if v_2.Op != OpAMD64FlagEQ {
5218 break
5219 }
5220 v.copyOf(x)
5221 return true
5222 }
5223
5224
5225 for {
5226 x := v_1
5227 if v_2.Op != OpAMD64FlagGT_UGT {
5228 break
5229 }
5230 v.copyOf(x)
5231 return true
5232 }
5233
5234
5235 for {
5236 x := v_1
5237 if v_2.Op != OpAMD64FlagGT_ULT {
5238 break
5239 }
5240 v.copyOf(x)
5241 return true
5242 }
5243
5244
5245 for {
5246 y := v_0
5247 if v_2.Op != OpAMD64FlagLT_ULT {
5248 break
5249 }
5250 v.copyOf(y)
5251 return true
5252 }
5253
5254
5255 for {
5256 y := v_0
5257 if v_2.Op != OpAMD64FlagLT_UGT {
5258 break
5259 }
5260 v.copyOf(y)
5261 return true
5262 }
5263 return false
5264 }
5265 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5266 v_2 := v.Args[2]
5267 v_1 := v.Args[1]
5268 v_0 := v.Args[0]
5269
5270
5271 for {
5272 x := v_0
5273 y := v_1
5274 if v_2.Op != OpAMD64InvertFlags {
5275 break
5276 }
5277 cond := v_2.Args[0]
5278 v.reset(OpAMD64CMOVQLT)
5279 v.AddArg3(x, y, cond)
5280 return true
5281 }
5282
5283
5284 for {
5285 y := v_0
5286 if v_2.Op != OpAMD64FlagEQ {
5287 break
5288 }
5289 v.copyOf(y)
5290 return true
5291 }
5292
5293
5294 for {
5295 x := v_1
5296 if v_2.Op != OpAMD64FlagGT_UGT {
5297 break
5298 }
5299 v.copyOf(x)
5300 return true
5301 }
5302
5303
5304 for {
5305 x := v_1
5306 if v_2.Op != OpAMD64FlagGT_ULT {
5307 break
5308 }
5309 v.copyOf(x)
5310 return true
5311 }
5312
5313
5314 for {
5315 y := v_0
5316 if v_2.Op != OpAMD64FlagLT_ULT {
5317 break
5318 }
5319 v.copyOf(y)
5320 return true
5321 }
5322
5323
5324 for {
5325 y := v_0
5326 if v_2.Op != OpAMD64FlagLT_UGT {
5327 break
5328 }
5329 v.copyOf(y)
5330 return true
5331 }
5332 return false
5333 }
5334 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5335 v_2 := v.Args[2]
5336 v_1 := v.Args[1]
5337 v_0 := v.Args[0]
5338
5339
5340 for {
5341 x := v_0
5342 y := v_1
5343 if v_2.Op != OpAMD64InvertFlags {
5344 break
5345 }
5346 cond := v_2.Args[0]
5347 v.reset(OpAMD64CMOVQCS)
5348 v.AddArg3(x, y, cond)
5349 return true
5350 }
5351
5352
5353 for {
5354 y := v_0
5355 if v_2.Op != OpAMD64FlagEQ {
5356 break
5357 }
5358 v.copyOf(y)
5359 return true
5360 }
5361
5362
5363 for {
5364 x := v_1
5365 if v_2.Op != OpAMD64FlagGT_UGT {
5366 break
5367 }
5368 v.copyOf(x)
5369 return true
5370 }
5371
5372
5373 for {
5374 y := v_0
5375 if v_2.Op != OpAMD64FlagGT_ULT {
5376 break
5377 }
5378 v.copyOf(y)
5379 return true
5380 }
5381
5382
5383 for {
5384 y := v_0
5385 if v_2.Op != OpAMD64FlagLT_ULT {
5386 break
5387 }
5388 v.copyOf(y)
5389 return true
5390 }
5391
5392
5393 for {
5394 x := v_1
5395 if v_2.Op != OpAMD64FlagLT_UGT {
5396 break
5397 }
5398 v.copyOf(x)
5399 return true
5400 }
5401 return false
5402 }
5403 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5404 v_2 := v.Args[2]
5405 v_1 := v.Args[1]
5406 v_0 := v.Args[0]
5407
5408
5409 for {
5410 x := v_0
5411 y := v_1
5412 if v_2.Op != OpAMD64InvertFlags {
5413 break
5414 }
5415 cond := v_2.Args[0]
5416 v.reset(OpAMD64CMOVQGE)
5417 v.AddArg3(x, y, cond)
5418 return true
5419 }
5420
5421
5422 for {
5423 x := v_1
5424 if v_2.Op != OpAMD64FlagEQ {
5425 break
5426 }
5427 v.copyOf(x)
5428 return true
5429 }
5430
5431
5432 for {
5433 y := v_0
5434 if v_2.Op != OpAMD64FlagGT_UGT {
5435 break
5436 }
5437 v.copyOf(y)
5438 return true
5439 }
5440
5441
5442 for {
5443 y := v_0
5444 if v_2.Op != OpAMD64FlagGT_ULT {
5445 break
5446 }
5447 v.copyOf(y)
5448 return true
5449 }
5450
5451
5452 for {
5453 x := v_1
5454 if v_2.Op != OpAMD64FlagLT_ULT {
5455 break
5456 }
5457 v.copyOf(x)
5458 return true
5459 }
5460
5461
5462 for {
5463 x := v_1
5464 if v_2.Op != OpAMD64FlagLT_UGT {
5465 break
5466 }
5467 v.copyOf(x)
5468 return true
5469 }
5470 return false
5471 }
5472 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5473 v_2 := v.Args[2]
5474 v_1 := v.Args[1]
5475 v_0 := v.Args[0]
5476
5477
5478 for {
5479 x := v_0
5480 y := v_1
5481 if v_2.Op != OpAMD64InvertFlags {
5482 break
5483 }
5484 cond := v_2.Args[0]
5485 v.reset(OpAMD64CMOVQCC)
5486 v.AddArg3(x, y, cond)
5487 return true
5488 }
5489
5490
5491 for {
5492 x := v_1
5493 if v_2.Op != OpAMD64FlagEQ {
5494 break
5495 }
5496 v.copyOf(x)
5497 return true
5498 }
5499
5500
5501 for {
5502 y := v_0
5503 if v_2.Op != OpAMD64FlagGT_UGT {
5504 break
5505 }
5506 v.copyOf(y)
5507 return true
5508 }
5509
5510
5511 for {
5512 x := v_1
5513 if v_2.Op != OpAMD64FlagGT_ULT {
5514 break
5515 }
5516 v.copyOf(x)
5517 return true
5518 }
5519
5520
5521 for {
5522 x := v_1
5523 if v_2.Op != OpAMD64FlagLT_ULT {
5524 break
5525 }
5526 v.copyOf(x)
5527 return true
5528 }
5529
5530
5531 for {
5532 y := v_0
5533 if v_2.Op != OpAMD64FlagLT_UGT {
5534 break
5535 }
5536 v.copyOf(y)
5537 return true
5538 }
5539 return false
5540 }
5541 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5542 v_2 := v.Args[2]
5543 v_1 := v.Args[1]
5544 v_0 := v.Args[0]
5545
5546
5547 for {
5548 x := v_0
5549 y := v_1
5550 if v_2.Op != OpAMD64InvertFlags {
5551 break
5552 }
5553 cond := v_2.Args[0]
5554 v.reset(OpAMD64CMOVQGT)
5555 v.AddArg3(x, y, cond)
5556 return true
5557 }
5558
5559
5560 for {
5561 y := v_0
5562 if v_2.Op != OpAMD64FlagEQ {
5563 break
5564 }
5565 v.copyOf(y)
5566 return true
5567 }
5568
5569
5570 for {
5571 y := v_0
5572 if v_2.Op != OpAMD64FlagGT_UGT {
5573 break
5574 }
5575 v.copyOf(y)
5576 return true
5577 }
5578
5579
5580 for {
5581 y := v_0
5582 if v_2.Op != OpAMD64FlagGT_ULT {
5583 break
5584 }
5585 v.copyOf(y)
5586 return true
5587 }
5588
5589
5590 for {
5591 x := v_1
5592 if v_2.Op != OpAMD64FlagLT_ULT {
5593 break
5594 }
5595 v.copyOf(x)
5596 return true
5597 }
5598
5599
5600 for {
5601 x := v_1
5602 if v_2.Op != OpAMD64FlagLT_UGT {
5603 break
5604 }
5605 v.copyOf(x)
5606 return true
5607 }
5608 return false
5609 }
5610 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5611 v_2 := v.Args[2]
5612 v_1 := v.Args[1]
5613 v_0 := v.Args[0]
5614
5615
5616 for {
5617 x := v_0
5618 y := v_1
5619 if v_2.Op != OpAMD64InvertFlags {
5620 break
5621 }
5622 cond := v_2.Args[0]
5623 v.reset(OpAMD64CMOVQNE)
5624 v.AddArg3(x, y, cond)
5625 return true
5626 }
5627
5628
5629 for {
5630 y := v_0
5631 if v_2.Op != OpAMD64FlagEQ {
5632 break
5633 }
5634 v.copyOf(y)
5635 return true
5636 }
5637
5638
5639 for {
5640 x := v_1
5641 if v_2.Op != OpAMD64FlagGT_UGT {
5642 break
5643 }
5644 v.copyOf(x)
5645 return true
5646 }
5647
5648
5649 for {
5650 x := v_1
5651 if v_2.Op != OpAMD64FlagGT_ULT {
5652 break
5653 }
5654 v.copyOf(x)
5655 return true
5656 }
5657
5658
5659 for {
5660 x := v_1
5661 if v_2.Op != OpAMD64FlagLT_ULT {
5662 break
5663 }
5664 v.copyOf(x)
5665 return true
5666 }
5667
5668
5669 for {
5670 x := v_1
5671 if v_2.Op != OpAMD64FlagLT_UGT {
5672 break
5673 }
5674 v.copyOf(x)
5675 return true
5676 }
5677 return false
5678 }
5679 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5680 v_2 := v.Args[2]
5681 v_1 := v.Args[1]
5682 v_0 := v.Args[0]
5683
5684
5685 for {
5686 x := v_0
5687 y := v_1
5688 if v_2.Op != OpAMD64InvertFlags {
5689 break
5690 }
5691 cond := v_2.Args[0]
5692 v.reset(OpAMD64CMOVWLS)
5693 v.AddArg3(x, y, cond)
5694 return true
5695 }
5696
5697
5698 for {
5699 x := v_1
5700 if v_2.Op != OpAMD64FlagEQ {
5701 break
5702 }
5703 v.copyOf(x)
5704 return true
5705 }
5706
5707
5708 for {
5709 x := v_1
5710 if v_2.Op != OpAMD64FlagGT_UGT {
5711 break
5712 }
5713 v.copyOf(x)
5714 return true
5715 }
5716
5717
5718 for {
5719 y := v_0
5720 if v_2.Op != OpAMD64FlagGT_ULT {
5721 break
5722 }
5723 v.copyOf(y)
5724 return true
5725 }
5726
5727
5728 for {
5729 y := v_0
5730 if v_2.Op != OpAMD64FlagLT_ULT {
5731 break
5732 }
5733 v.copyOf(y)
5734 return true
5735 }
5736
5737
5738 for {
5739 x := v_1
5740 if v_2.Op != OpAMD64FlagLT_UGT {
5741 break
5742 }
5743 v.copyOf(x)
5744 return true
5745 }
5746 return false
5747 }
5748 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5749 v_2 := v.Args[2]
5750 v_1 := v.Args[1]
5751 v_0 := v.Args[0]
5752
5753
5754 for {
5755 x := v_0
5756 y := v_1
5757 if v_2.Op != OpAMD64InvertFlags {
5758 break
5759 }
5760 cond := v_2.Args[0]
5761 v.reset(OpAMD64CMOVWHI)
5762 v.AddArg3(x, y, cond)
5763 return true
5764 }
5765
5766
5767 for {
5768 y := v_0
5769 if v_2.Op != OpAMD64FlagEQ {
5770 break
5771 }
5772 v.copyOf(y)
5773 return true
5774 }
5775
5776
5777 for {
5778 y := v_0
5779 if v_2.Op != OpAMD64FlagGT_UGT {
5780 break
5781 }
5782 v.copyOf(y)
5783 return true
5784 }
5785
5786
5787 for {
5788 x := v_1
5789 if v_2.Op != OpAMD64FlagGT_ULT {
5790 break
5791 }
5792 v.copyOf(x)
5793 return true
5794 }
5795
5796
5797 for {
5798 x := v_1
5799 if v_2.Op != OpAMD64FlagLT_ULT {
5800 break
5801 }
5802 v.copyOf(x)
5803 return true
5804 }
5805
5806
5807 for {
5808 y := v_0
5809 if v_2.Op != OpAMD64FlagLT_UGT {
5810 break
5811 }
5812 v.copyOf(y)
5813 return true
5814 }
5815 return false
5816 }
5817 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5818 v_2 := v.Args[2]
5819 v_1 := v.Args[1]
5820 v_0 := v.Args[0]
5821
5822
5823 for {
5824 x := v_0
5825 y := v_1
5826 if v_2.Op != OpAMD64InvertFlags {
5827 break
5828 }
5829 cond := v_2.Args[0]
5830 v.reset(OpAMD64CMOVWEQ)
5831 v.AddArg3(x, y, cond)
5832 return true
5833 }
5834
5835
5836 for {
5837 x := v_1
5838 if v_2.Op != OpAMD64FlagEQ {
5839 break
5840 }
5841 v.copyOf(x)
5842 return true
5843 }
5844
5845
5846 for {
5847 y := v_0
5848 if v_2.Op != OpAMD64FlagGT_UGT {
5849 break
5850 }
5851 v.copyOf(y)
5852 return true
5853 }
5854
5855
5856 for {
5857 y := v_0
5858 if v_2.Op != OpAMD64FlagGT_ULT {
5859 break
5860 }
5861 v.copyOf(y)
5862 return true
5863 }
5864
5865
5866 for {
5867 y := v_0
5868 if v_2.Op != OpAMD64FlagLT_ULT {
5869 break
5870 }
5871 v.copyOf(y)
5872 return true
5873 }
5874
5875
5876 for {
5877 y := v_0
5878 if v_2.Op != OpAMD64FlagLT_UGT {
5879 break
5880 }
5881 v.copyOf(y)
5882 return true
5883 }
5884 return false
5885 }
5886 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
5887 v_2 := v.Args[2]
5888 v_1 := v.Args[1]
5889 v_0 := v.Args[0]
5890
5891
5892 for {
5893 x := v_0
5894 y := v_1
5895 if v_2.Op != OpAMD64InvertFlags {
5896 break
5897 }
5898 cond := v_2.Args[0]
5899 v.reset(OpAMD64CMOVWLE)
5900 v.AddArg3(x, y, cond)
5901 return true
5902 }
5903
5904
5905 for {
5906 x := v_1
5907 if v_2.Op != OpAMD64FlagEQ {
5908 break
5909 }
5910 v.copyOf(x)
5911 return true
5912 }
5913
5914
5915 for {
5916 x := v_1
5917 if v_2.Op != OpAMD64FlagGT_UGT {
5918 break
5919 }
5920 v.copyOf(x)
5921 return true
5922 }
5923
5924
5925 for {
5926 x := v_1
5927 if v_2.Op != OpAMD64FlagGT_ULT {
5928 break
5929 }
5930 v.copyOf(x)
5931 return true
5932 }
5933
5934
5935 for {
5936 y := v_0
5937 if v_2.Op != OpAMD64FlagLT_ULT {
5938 break
5939 }
5940 v.copyOf(y)
5941 return true
5942 }
5943
5944
5945 for {
5946 y := v_0
5947 if v_2.Op != OpAMD64FlagLT_UGT {
5948 break
5949 }
5950 v.copyOf(y)
5951 return true
5952 }
5953 return false
5954 }
5955 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
5956 v_2 := v.Args[2]
5957 v_1 := v.Args[1]
5958 v_0 := v.Args[0]
5959
5960
5961 for {
5962 x := v_0
5963 y := v_1
5964 if v_2.Op != OpAMD64InvertFlags {
5965 break
5966 }
5967 cond := v_2.Args[0]
5968 v.reset(OpAMD64CMOVWLT)
5969 v.AddArg3(x, y, cond)
5970 return true
5971 }
5972
5973
5974 for {
5975 y := v_0
5976 if v_2.Op != OpAMD64FlagEQ {
5977 break
5978 }
5979 v.copyOf(y)
5980 return true
5981 }
5982
5983
5984 for {
5985 x := v_1
5986 if v_2.Op != OpAMD64FlagGT_UGT {
5987 break
5988 }
5989 v.copyOf(x)
5990 return true
5991 }
5992
5993
5994 for {
5995 x := v_1
5996 if v_2.Op != OpAMD64FlagGT_ULT {
5997 break
5998 }
5999 v.copyOf(x)
6000 return true
6001 }
6002
6003
6004 for {
6005 y := v_0
6006 if v_2.Op != OpAMD64FlagLT_ULT {
6007 break
6008 }
6009 v.copyOf(y)
6010 return true
6011 }
6012
6013
6014 for {
6015 y := v_0
6016 if v_2.Op != OpAMD64FlagLT_UGT {
6017 break
6018 }
6019 v.copyOf(y)
6020 return true
6021 }
6022 return false
6023 }
6024 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6025 v_2 := v.Args[2]
6026 v_1 := v.Args[1]
6027 v_0 := v.Args[0]
6028
6029
6030 for {
6031 x := v_0
6032 y := v_1
6033 if v_2.Op != OpAMD64InvertFlags {
6034 break
6035 }
6036 cond := v_2.Args[0]
6037 v.reset(OpAMD64CMOVWCS)
6038 v.AddArg3(x, y, cond)
6039 return true
6040 }
6041
6042
6043 for {
6044 y := v_0
6045 if v_2.Op != OpAMD64FlagEQ {
6046 break
6047 }
6048 v.copyOf(y)
6049 return true
6050 }
6051
6052
6053 for {
6054 x := v_1
6055 if v_2.Op != OpAMD64FlagGT_UGT {
6056 break
6057 }
6058 v.copyOf(x)
6059 return true
6060 }
6061
6062
6063 for {
6064 y := v_0
6065 if v_2.Op != OpAMD64FlagGT_ULT {
6066 break
6067 }
6068 v.copyOf(y)
6069 return true
6070 }
6071
6072
6073 for {
6074 y := v_0
6075 if v_2.Op != OpAMD64FlagLT_ULT {
6076 break
6077 }
6078 v.copyOf(y)
6079 return true
6080 }
6081
6082
6083 for {
6084 x := v_1
6085 if v_2.Op != OpAMD64FlagLT_UGT {
6086 break
6087 }
6088 v.copyOf(x)
6089 return true
6090 }
6091 return false
6092 }
6093 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6094 v_2 := v.Args[2]
6095 v_1 := v.Args[1]
6096 v_0 := v.Args[0]
6097
6098
6099 for {
6100 x := v_0
6101 y := v_1
6102 if v_2.Op != OpAMD64InvertFlags {
6103 break
6104 }
6105 cond := v_2.Args[0]
6106 v.reset(OpAMD64CMOVWGE)
6107 v.AddArg3(x, y, cond)
6108 return true
6109 }
6110
6111
6112 for {
6113 x := v_1
6114 if v_2.Op != OpAMD64FlagEQ {
6115 break
6116 }
6117 v.copyOf(x)
6118 return true
6119 }
6120
6121
6122 for {
6123 y := v_0
6124 if v_2.Op != OpAMD64FlagGT_UGT {
6125 break
6126 }
6127 v.copyOf(y)
6128 return true
6129 }
6130
6131
6132 for {
6133 y := v_0
6134 if v_2.Op != OpAMD64FlagGT_ULT {
6135 break
6136 }
6137 v.copyOf(y)
6138 return true
6139 }
6140
6141
6142 for {
6143 x := v_1
6144 if v_2.Op != OpAMD64FlagLT_ULT {
6145 break
6146 }
6147 v.copyOf(x)
6148 return true
6149 }
6150
6151
6152 for {
6153 x := v_1
6154 if v_2.Op != OpAMD64FlagLT_UGT {
6155 break
6156 }
6157 v.copyOf(x)
6158 return true
6159 }
6160 return false
6161 }
6162 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6163 v_2 := v.Args[2]
6164 v_1 := v.Args[1]
6165 v_0 := v.Args[0]
6166
6167
6168 for {
6169 x := v_0
6170 y := v_1
6171 if v_2.Op != OpAMD64InvertFlags {
6172 break
6173 }
6174 cond := v_2.Args[0]
6175 v.reset(OpAMD64CMOVWCC)
6176 v.AddArg3(x, y, cond)
6177 return true
6178 }
6179
6180
6181 for {
6182 x := v_1
6183 if v_2.Op != OpAMD64FlagEQ {
6184 break
6185 }
6186 v.copyOf(x)
6187 return true
6188 }
6189
6190
6191 for {
6192 y := v_0
6193 if v_2.Op != OpAMD64FlagGT_UGT {
6194 break
6195 }
6196 v.copyOf(y)
6197 return true
6198 }
6199
6200
6201 for {
6202 x := v_1
6203 if v_2.Op != OpAMD64FlagGT_ULT {
6204 break
6205 }
6206 v.copyOf(x)
6207 return true
6208 }
6209
6210
6211 for {
6212 x := v_1
6213 if v_2.Op != OpAMD64FlagLT_ULT {
6214 break
6215 }
6216 v.copyOf(x)
6217 return true
6218 }
6219
6220
6221 for {
6222 y := v_0
6223 if v_2.Op != OpAMD64FlagLT_UGT {
6224 break
6225 }
6226 v.copyOf(y)
6227 return true
6228 }
6229 return false
6230 }
6231 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6232 v_2 := v.Args[2]
6233 v_1 := v.Args[1]
6234 v_0 := v.Args[0]
6235
6236
6237 for {
6238 x := v_0
6239 y := v_1
6240 if v_2.Op != OpAMD64InvertFlags {
6241 break
6242 }
6243 cond := v_2.Args[0]
6244 v.reset(OpAMD64CMOVWGT)
6245 v.AddArg3(x, y, cond)
6246 return true
6247 }
6248
6249
6250 for {
6251 y := v_0
6252 if v_2.Op != OpAMD64FlagEQ {
6253 break
6254 }
6255 v.copyOf(y)
6256 return true
6257 }
6258
6259
6260 for {
6261 y := v_0
6262 if v_2.Op != OpAMD64FlagGT_UGT {
6263 break
6264 }
6265 v.copyOf(y)
6266 return true
6267 }
6268
6269
6270 for {
6271 y := v_0
6272 if v_2.Op != OpAMD64FlagGT_ULT {
6273 break
6274 }
6275 v.copyOf(y)
6276 return true
6277 }
6278
6279
6280 for {
6281 x := v_1
6282 if v_2.Op != OpAMD64FlagLT_ULT {
6283 break
6284 }
6285 v.copyOf(x)
6286 return true
6287 }
6288
6289
6290 for {
6291 x := v_1
6292 if v_2.Op != OpAMD64FlagLT_UGT {
6293 break
6294 }
6295 v.copyOf(x)
6296 return true
6297 }
6298 return false
6299 }
6300 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6301 v_2 := v.Args[2]
6302 v_1 := v.Args[1]
6303 v_0 := v.Args[0]
6304
6305
6306 for {
6307 x := v_0
6308 y := v_1
6309 if v_2.Op != OpAMD64InvertFlags {
6310 break
6311 }
6312 cond := v_2.Args[0]
6313 v.reset(OpAMD64CMOVWNE)
6314 v.AddArg3(x, y, cond)
6315 return true
6316 }
6317
6318
6319 for {
6320 y := v_0
6321 if v_2.Op != OpAMD64FlagEQ {
6322 break
6323 }
6324 v.copyOf(y)
6325 return true
6326 }
6327
6328
6329 for {
6330 x := v_1
6331 if v_2.Op != OpAMD64FlagGT_UGT {
6332 break
6333 }
6334 v.copyOf(x)
6335 return true
6336 }
6337
6338
6339 for {
6340 x := v_1
6341 if v_2.Op != OpAMD64FlagGT_ULT {
6342 break
6343 }
6344 v.copyOf(x)
6345 return true
6346 }
6347
6348
6349 for {
6350 x := v_1
6351 if v_2.Op != OpAMD64FlagLT_ULT {
6352 break
6353 }
6354 v.copyOf(x)
6355 return true
6356 }
6357
6358
6359 for {
6360 x := v_1
6361 if v_2.Op != OpAMD64FlagLT_UGT {
6362 break
6363 }
6364 v.copyOf(x)
6365 return true
6366 }
6367 return false
6368 }
6369 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6370 v_1 := v.Args[1]
6371 v_0 := v.Args[0]
6372 b := v.Block
6373
6374
6375 for {
6376 x := v_0
6377 if v_1.Op != OpAMD64MOVLconst {
6378 break
6379 }
6380 c := auxIntToInt32(v_1.AuxInt)
6381 v.reset(OpAMD64CMPBconst)
6382 v.AuxInt = int8ToAuxInt(int8(c))
6383 v.AddArg(x)
6384 return true
6385 }
6386
6387
6388 for {
6389 if v_0.Op != OpAMD64MOVLconst {
6390 break
6391 }
6392 c := auxIntToInt32(v_0.AuxInt)
6393 x := v_1
6394 v.reset(OpAMD64InvertFlags)
6395 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6396 v0.AuxInt = int8ToAuxInt(int8(c))
6397 v0.AddArg(x)
6398 v.AddArg(v0)
6399 return true
6400 }
6401
6402
6403
6404 for {
6405 x := v_0
6406 y := v_1
6407 if !(canonLessThan(x, y)) {
6408 break
6409 }
6410 v.reset(OpAMD64InvertFlags)
6411 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6412 v0.AddArg2(y, x)
6413 v.AddArg(v0)
6414 return true
6415 }
6416
6417
6418
6419 for {
6420 l := v_0
6421 if l.Op != OpAMD64MOVBload {
6422 break
6423 }
6424 off := auxIntToInt32(l.AuxInt)
6425 sym := auxToSym(l.Aux)
6426 mem := l.Args[1]
6427 ptr := l.Args[0]
6428 x := v_1
6429 if !(canMergeLoad(v, l) && clobber(l)) {
6430 break
6431 }
6432 v.reset(OpAMD64CMPBload)
6433 v.AuxInt = int32ToAuxInt(off)
6434 v.Aux = symToAux(sym)
6435 v.AddArg3(ptr, x, mem)
6436 return true
6437 }
6438
6439
6440
6441 for {
6442 x := v_0
6443 l := v_1
6444 if l.Op != OpAMD64MOVBload {
6445 break
6446 }
6447 off := auxIntToInt32(l.AuxInt)
6448 sym := auxToSym(l.Aux)
6449 mem := l.Args[1]
6450 ptr := l.Args[0]
6451 if !(canMergeLoad(v, l) && clobber(l)) {
6452 break
6453 }
6454 v.reset(OpAMD64InvertFlags)
6455 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6456 v0.AuxInt = int32ToAuxInt(off)
6457 v0.Aux = symToAux(sym)
6458 v0.AddArg3(ptr, x, mem)
6459 v.AddArg(v0)
6460 return true
6461 }
6462 return false
6463 }
6464 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6465 v_0 := v.Args[0]
6466 b := v.Block
6467
6468
6469
6470 for {
6471 y := auxIntToInt8(v.AuxInt)
6472 if v_0.Op != OpAMD64MOVLconst {
6473 break
6474 }
6475 x := auxIntToInt32(v_0.AuxInt)
6476 if !(int8(x) == y) {
6477 break
6478 }
6479 v.reset(OpAMD64FlagEQ)
6480 return true
6481 }
6482
6483
6484
6485 for {
6486 y := auxIntToInt8(v.AuxInt)
6487 if v_0.Op != OpAMD64MOVLconst {
6488 break
6489 }
6490 x := auxIntToInt32(v_0.AuxInt)
6491 if !(int8(x) < y && uint8(x) < uint8(y)) {
6492 break
6493 }
6494 v.reset(OpAMD64FlagLT_ULT)
6495 return true
6496 }
6497
6498
6499
6500 for {
6501 y := auxIntToInt8(v.AuxInt)
6502 if v_0.Op != OpAMD64MOVLconst {
6503 break
6504 }
6505 x := auxIntToInt32(v_0.AuxInt)
6506 if !(int8(x) < y && uint8(x) > uint8(y)) {
6507 break
6508 }
6509 v.reset(OpAMD64FlagLT_UGT)
6510 return true
6511 }
6512
6513
6514
6515 for {
6516 y := auxIntToInt8(v.AuxInt)
6517 if v_0.Op != OpAMD64MOVLconst {
6518 break
6519 }
6520 x := auxIntToInt32(v_0.AuxInt)
6521 if !(int8(x) > y && uint8(x) < uint8(y)) {
6522 break
6523 }
6524 v.reset(OpAMD64FlagGT_ULT)
6525 return true
6526 }
6527
6528
6529
6530 for {
6531 y := auxIntToInt8(v.AuxInt)
6532 if v_0.Op != OpAMD64MOVLconst {
6533 break
6534 }
6535 x := auxIntToInt32(v_0.AuxInt)
6536 if !(int8(x) > y && uint8(x) > uint8(y)) {
6537 break
6538 }
6539 v.reset(OpAMD64FlagGT_UGT)
6540 return true
6541 }
6542
6543
6544
6545 for {
6546 n := auxIntToInt8(v.AuxInt)
6547 if v_0.Op != OpAMD64ANDLconst {
6548 break
6549 }
6550 m := auxIntToInt32(v_0.AuxInt)
6551 if !(0 <= int8(m) && int8(m) < n) {
6552 break
6553 }
6554 v.reset(OpAMD64FlagLT_ULT)
6555 return true
6556 }
6557
6558
6559
6560 for {
6561 if auxIntToInt8(v.AuxInt) != 0 {
6562 break
6563 }
6564 a := v_0
6565 if a.Op != OpAMD64ANDL {
6566 break
6567 }
6568 y := a.Args[1]
6569 x := a.Args[0]
6570 if !(a.Uses == 1) {
6571 break
6572 }
6573 v.reset(OpAMD64TESTB)
6574 v.AddArg2(x, y)
6575 return true
6576 }
6577
6578
6579
6580 for {
6581 if auxIntToInt8(v.AuxInt) != 0 {
6582 break
6583 }
6584 a := v_0
6585 if a.Op != OpAMD64ANDLconst {
6586 break
6587 }
6588 c := auxIntToInt32(a.AuxInt)
6589 x := a.Args[0]
6590 if !(a.Uses == 1) {
6591 break
6592 }
6593 v.reset(OpAMD64TESTBconst)
6594 v.AuxInt = int8ToAuxInt(int8(c))
6595 v.AddArg(x)
6596 return true
6597 }
6598
6599
6600 for {
6601 if auxIntToInt8(v.AuxInt) != 0 {
6602 break
6603 }
6604 x := v_0
6605 v.reset(OpAMD64TESTB)
6606 v.AddArg2(x, x)
6607 return true
6608 }
6609
6610
6611
6612 for {
6613 c := auxIntToInt8(v.AuxInt)
6614 l := v_0
6615 if l.Op != OpAMD64MOVBload {
6616 break
6617 }
6618 off := auxIntToInt32(l.AuxInt)
6619 sym := auxToSym(l.Aux)
6620 mem := l.Args[1]
6621 ptr := l.Args[0]
6622 if !(l.Uses == 1 && clobber(l)) {
6623 break
6624 }
6625 b = l.Block
6626 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6627 v.copyOf(v0)
6628 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6629 v0.Aux = symToAux(sym)
6630 v0.AddArg2(ptr, mem)
6631 return true
6632 }
6633 return false
6634 }
6635 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6636 v_1 := v.Args[1]
6637 v_0 := v.Args[0]
6638
6639
6640
6641 for {
6642 valoff1 := auxIntToValAndOff(v.AuxInt)
6643 sym := auxToSym(v.Aux)
6644 if v_0.Op != OpAMD64ADDQconst {
6645 break
6646 }
6647 off2 := auxIntToInt32(v_0.AuxInt)
6648 base := v_0.Args[0]
6649 mem := v_1
6650 if !(ValAndOff(valoff1).canAdd32(off2)) {
6651 break
6652 }
6653 v.reset(OpAMD64CMPBconstload)
6654 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6655 v.Aux = symToAux(sym)
6656 v.AddArg2(base, mem)
6657 return true
6658 }
6659
6660
6661
6662 for {
6663 valoff1 := auxIntToValAndOff(v.AuxInt)
6664 sym1 := auxToSym(v.Aux)
6665 if v_0.Op != OpAMD64LEAQ {
6666 break
6667 }
6668 off2 := auxIntToInt32(v_0.AuxInt)
6669 sym2 := auxToSym(v_0.Aux)
6670 base := v_0.Args[0]
6671 mem := v_1
6672 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6673 break
6674 }
6675 v.reset(OpAMD64CMPBconstload)
6676 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6677 v.Aux = symToAux(mergeSym(sym1, sym2))
6678 v.AddArg2(base, mem)
6679 return true
6680 }
6681 return false
6682 }
6683 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6684 v_2 := v.Args[2]
6685 v_1 := v.Args[1]
6686 v_0 := v.Args[0]
6687
6688
6689
6690 for {
6691 off1 := auxIntToInt32(v.AuxInt)
6692 sym := auxToSym(v.Aux)
6693 if v_0.Op != OpAMD64ADDQconst {
6694 break
6695 }
6696 off2 := auxIntToInt32(v_0.AuxInt)
6697 base := v_0.Args[0]
6698 val := v_1
6699 mem := v_2
6700 if !(is32Bit(int64(off1) + int64(off2))) {
6701 break
6702 }
6703 v.reset(OpAMD64CMPBload)
6704 v.AuxInt = int32ToAuxInt(off1 + off2)
6705 v.Aux = symToAux(sym)
6706 v.AddArg3(base, val, mem)
6707 return true
6708 }
6709
6710
6711
6712 for {
6713 off1 := auxIntToInt32(v.AuxInt)
6714 sym1 := auxToSym(v.Aux)
6715 if v_0.Op != OpAMD64LEAQ {
6716 break
6717 }
6718 off2 := auxIntToInt32(v_0.AuxInt)
6719 sym2 := auxToSym(v_0.Aux)
6720 base := v_0.Args[0]
6721 val := v_1
6722 mem := v_2
6723 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6724 break
6725 }
6726 v.reset(OpAMD64CMPBload)
6727 v.AuxInt = int32ToAuxInt(off1 + off2)
6728 v.Aux = symToAux(mergeSym(sym1, sym2))
6729 v.AddArg3(base, val, mem)
6730 return true
6731 }
6732
6733
6734 for {
6735 off := auxIntToInt32(v.AuxInt)
6736 sym := auxToSym(v.Aux)
6737 ptr := v_0
6738 if v_1.Op != OpAMD64MOVLconst {
6739 break
6740 }
6741 c := auxIntToInt32(v_1.AuxInt)
6742 mem := v_2
6743 v.reset(OpAMD64CMPBconstload)
6744 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6745 v.Aux = symToAux(sym)
6746 v.AddArg2(ptr, mem)
6747 return true
6748 }
6749 return false
6750 }
6751 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6752 v_1 := v.Args[1]
6753 v_0 := v.Args[0]
6754 b := v.Block
6755
6756
6757 for {
6758 x := v_0
6759 if v_1.Op != OpAMD64MOVLconst {
6760 break
6761 }
6762 c := auxIntToInt32(v_1.AuxInt)
6763 v.reset(OpAMD64CMPLconst)
6764 v.AuxInt = int32ToAuxInt(c)
6765 v.AddArg(x)
6766 return true
6767 }
6768
6769
6770 for {
6771 if v_0.Op != OpAMD64MOVLconst {
6772 break
6773 }
6774 c := auxIntToInt32(v_0.AuxInt)
6775 x := v_1
6776 v.reset(OpAMD64InvertFlags)
6777 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6778 v0.AuxInt = int32ToAuxInt(c)
6779 v0.AddArg(x)
6780 v.AddArg(v0)
6781 return true
6782 }
6783
6784
6785
6786 for {
6787 x := v_0
6788 y := v_1
6789 if !(canonLessThan(x, y)) {
6790 break
6791 }
6792 v.reset(OpAMD64InvertFlags)
6793 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6794 v0.AddArg2(y, x)
6795 v.AddArg(v0)
6796 return true
6797 }
6798
6799
6800
6801 for {
6802 l := v_0
6803 if l.Op != OpAMD64MOVLload {
6804 break
6805 }
6806 off := auxIntToInt32(l.AuxInt)
6807 sym := auxToSym(l.Aux)
6808 mem := l.Args[1]
6809 ptr := l.Args[0]
6810 x := v_1
6811 if !(canMergeLoad(v, l) && clobber(l)) {
6812 break
6813 }
6814 v.reset(OpAMD64CMPLload)
6815 v.AuxInt = int32ToAuxInt(off)
6816 v.Aux = symToAux(sym)
6817 v.AddArg3(ptr, x, mem)
6818 return true
6819 }
6820
6821
6822
6823 for {
6824 x := v_0
6825 l := v_1
6826 if l.Op != OpAMD64MOVLload {
6827 break
6828 }
6829 off := auxIntToInt32(l.AuxInt)
6830 sym := auxToSym(l.Aux)
6831 mem := l.Args[1]
6832 ptr := l.Args[0]
6833 if !(canMergeLoad(v, l) && clobber(l)) {
6834 break
6835 }
6836 v.reset(OpAMD64InvertFlags)
6837 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6838 v0.AuxInt = int32ToAuxInt(off)
6839 v0.Aux = symToAux(sym)
6840 v0.AddArg3(ptr, x, mem)
6841 v.AddArg(v0)
6842 return true
6843 }
6844 return false
6845 }
6846 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6847 v_0 := v.Args[0]
6848 b := v.Block
6849
6850
6851
6852 for {
6853 y := auxIntToInt32(v.AuxInt)
6854 if v_0.Op != OpAMD64MOVLconst {
6855 break
6856 }
6857 x := auxIntToInt32(v_0.AuxInt)
6858 if !(x == y) {
6859 break
6860 }
6861 v.reset(OpAMD64FlagEQ)
6862 return true
6863 }
6864
6865
6866
6867 for {
6868 y := auxIntToInt32(v.AuxInt)
6869 if v_0.Op != OpAMD64MOVLconst {
6870 break
6871 }
6872 x := auxIntToInt32(v_0.AuxInt)
6873 if !(x < y && uint32(x) < uint32(y)) {
6874 break
6875 }
6876 v.reset(OpAMD64FlagLT_ULT)
6877 return true
6878 }
6879
6880
6881
6882 for {
6883 y := auxIntToInt32(v.AuxInt)
6884 if v_0.Op != OpAMD64MOVLconst {
6885 break
6886 }
6887 x := auxIntToInt32(v_0.AuxInt)
6888 if !(x < y && uint32(x) > uint32(y)) {
6889 break
6890 }
6891 v.reset(OpAMD64FlagLT_UGT)
6892 return true
6893 }
6894
6895
6896
6897 for {
6898 y := auxIntToInt32(v.AuxInt)
6899 if v_0.Op != OpAMD64MOVLconst {
6900 break
6901 }
6902 x := auxIntToInt32(v_0.AuxInt)
6903 if !(x > y && uint32(x) < uint32(y)) {
6904 break
6905 }
6906 v.reset(OpAMD64FlagGT_ULT)
6907 return true
6908 }
6909
6910
6911
6912 for {
6913 y := auxIntToInt32(v.AuxInt)
6914 if v_0.Op != OpAMD64MOVLconst {
6915 break
6916 }
6917 x := auxIntToInt32(v_0.AuxInt)
6918 if !(x > y && uint32(x) > uint32(y)) {
6919 break
6920 }
6921 v.reset(OpAMD64FlagGT_UGT)
6922 return true
6923 }
6924
6925
6926
6927 for {
6928 n := auxIntToInt32(v.AuxInt)
6929 if v_0.Op != OpAMD64SHRLconst {
6930 break
6931 }
6932 c := auxIntToInt8(v_0.AuxInt)
6933 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
6934 break
6935 }
6936 v.reset(OpAMD64FlagLT_ULT)
6937 return true
6938 }
6939
6940
6941
6942 for {
6943 n := auxIntToInt32(v.AuxInt)
6944 if v_0.Op != OpAMD64ANDLconst {
6945 break
6946 }
6947 m := auxIntToInt32(v_0.AuxInt)
6948 if !(0 <= m && m < n) {
6949 break
6950 }
6951 v.reset(OpAMD64FlagLT_ULT)
6952 return true
6953 }
6954
6955
6956
6957 for {
6958 if auxIntToInt32(v.AuxInt) != 0 {
6959 break
6960 }
6961 a := v_0
6962 if a.Op != OpAMD64ANDL {
6963 break
6964 }
6965 y := a.Args[1]
6966 x := a.Args[0]
6967 if !(a.Uses == 1) {
6968 break
6969 }
6970 v.reset(OpAMD64TESTL)
6971 v.AddArg2(x, y)
6972 return true
6973 }
6974
6975
6976
6977 for {
6978 if auxIntToInt32(v.AuxInt) != 0 {
6979 break
6980 }
6981 a := v_0
6982 if a.Op != OpAMD64ANDLconst {
6983 break
6984 }
6985 c := auxIntToInt32(a.AuxInt)
6986 x := a.Args[0]
6987 if !(a.Uses == 1) {
6988 break
6989 }
6990 v.reset(OpAMD64TESTLconst)
6991 v.AuxInt = int32ToAuxInt(c)
6992 v.AddArg(x)
6993 return true
6994 }
6995
6996
6997 for {
6998 if auxIntToInt32(v.AuxInt) != 0 {
6999 break
7000 }
7001 x := v_0
7002 v.reset(OpAMD64TESTL)
7003 v.AddArg2(x, x)
7004 return true
7005 }
7006
7007
7008
7009 for {
7010 c := auxIntToInt32(v.AuxInt)
7011 l := v_0
7012 if l.Op != OpAMD64MOVLload {
7013 break
7014 }
7015 off := auxIntToInt32(l.AuxInt)
7016 sym := auxToSym(l.Aux)
7017 mem := l.Args[1]
7018 ptr := l.Args[0]
7019 if !(l.Uses == 1 && clobber(l)) {
7020 break
7021 }
7022 b = l.Block
7023 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7024 v.copyOf(v0)
7025 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7026 v0.Aux = symToAux(sym)
7027 v0.AddArg2(ptr, mem)
7028 return true
7029 }
7030 return false
7031 }
7032 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7033 v_1 := v.Args[1]
7034 v_0 := v.Args[0]
7035
7036
7037
7038 for {
7039 valoff1 := auxIntToValAndOff(v.AuxInt)
7040 sym := auxToSym(v.Aux)
7041 if v_0.Op != OpAMD64ADDQconst {
7042 break
7043 }
7044 off2 := auxIntToInt32(v_0.AuxInt)
7045 base := v_0.Args[0]
7046 mem := v_1
7047 if !(ValAndOff(valoff1).canAdd32(off2)) {
7048 break
7049 }
7050 v.reset(OpAMD64CMPLconstload)
7051 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7052 v.Aux = symToAux(sym)
7053 v.AddArg2(base, mem)
7054 return true
7055 }
7056
7057
7058
7059 for {
7060 valoff1 := auxIntToValAndOff(v.AuxInt)
7061 sym1 := auxToSym(v.Aux)
7062 if v_0.Op != OpAMD64LEAQ {
7063 break
7064 }
7065 off2 := auxIntToInt32(v_0.AuxInt)
7066 sym2 := auxToSym(v_0.Aux)
7067 base := v_0.Args[0]
7068 mem := v_1
7069 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7070 break
7071 }
7072 v.reset(OpAMD64CMPLconstload)
7073 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7074 v.Aux = symToAux(mergeSym(sym1, sym2))
7075 v.AddArg2(base, mem)
7076 return true
7077 }
7078 return false
7079 }
7080 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7081 v_2 := v.Args[2]
7082 v_1 := v.Args[1]
7083 v_0 := v.Args[0]
7084
7085
7086
7087 for {
7088 off1 := auxIntToInt32(v.AuxInt)
7089 sym := auxToSym(v.Aux)
7090 if v_0.Op != OpAMD64ADDQconst {
7091 break
7092 }
7093 off2 := auxIntToInt32(v_0.AuxInt)
7094 base := v_0.Args[0]
7095 val := v_1
7096 mem := v_2
7097 if !(is32Bit(int64(off1) + int64(off2))) {
7098 break
7099 }
7100 v.reset(OpAMD64CMPLload)
7101 v.AuxInt = int32ToAuxInt(off1 + off2)
7102 v.Aux = symToAux(sym)
7103 v.AddArg3(base, val, mem)
7104 return true
7105 }
7106
7107
7108
7109 for {
7110 off1 := auxIntToInt32(v.AuxInt)
7111 sym1 := auxToSym(v.Aux)
7112 if v_0.Op != OpAMD64LEAQ {
7113 break
7114 }
7115 off2 := auxIntToInt32(v_0.AuxInt)
7116 sym2 := auxToSym(v_0.Aux)
7117 base := v_0.Args[0]
7118 val := v_1
7119 mem := v_2
7120 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7121 break
7122 }
7123 v.reset(OpAMD64CMPLload)
7124 v.AuxInt = int32ToAuxInt(off1 + off2)
7125 v.Aux = symToAux(mergeSym(sym1, sym2))
7126 v.AddArg3(base, val, mem)
7127 return true
7128 }
7129
7130
7131 for {
7132 off := auxIntToInt32(v.AuxInt)
7133 sym := auxToSym(v.Aux)
7134 ptr := v_0
7135 if v_1.Op != OpAMD64MOVLconst {
7136 break
7137 }
7138 c := auxIntToInt32(v_1.AuxInt)
7139 mem := v_2
7140 v.reset(OpAMD64CMPLconstload)
7141 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7142 v.Aux = symToAux(sym)
7143 v.AddArg2(ptr, mem)
7144 return true
7145 }
7146 return false
7147 }
7148 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7149 v_1 := v.Args[1]
7150 v_0 := v.Args[0]
7151 b := v.Block
7152
7153
7154
7155 for {
7156 x := v_0
7157 if v_1.Op != OpAMD64MOVQconst {
7158 break
7159 }
7160 c := auxIntToInt64(v_1.AuxInt)
7161 if !(is32Bit(c)) {
7162 break
7163 }
7164 v.reset(OpAMD64CMPQconst)
7165 v.AuxInt = int32ToAuxInt(int32(c))
7166 v.AddArg(x)
7167 return true
7168 }
7169
7170
7171
7172 for {
7173 if v_0.Op != OpAMD64MOVQconst {
7174 break
7175 }
7176 c := auxIntToInt64(v_0.AuxInt)
7177 x := v_1
7178 if !(is32Bit(c)) {
7179 break
7180 }
7181 v.reset(OpAMD64InvertFlags)
7182 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7183 v0.AuxInt = int32ToAuxInt(int32(c))
7184 v0.AddArg(x)
7185 v.AddArg(v0)
7186 return true
7187 }
7188
7189
7190
7191 for {
7192 x := v_0
7193 y := v_1
7194 if !(canonLessThan(x, y)) {
7195 break
7196 }
7197 v.reset(OpAMD64InvertFlags)
7198 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7199 v0.AddArg2(y, x)
7200 v.AddArg(v0)
7201 return true
7202 }
7203
7204
7205
7206 for {
7207 if v_0.Op != OpAMD64MOVQconst {
7208 break
7209 }
7210 x := auxIntToInt64(v_0.AuxInt)
7211 if v_1.Op != OpAMD64MOVQconst {
7212 break
7213 }
7214 y := auxIntToInt64(v_1.AuxInt)
7215 if !(x == y) {
7216 break
7217 }
7218 v.reset(OpAMD64FlagEQ)
7219 return true
7220 }
7221
7222
7223
7224 for {
7225 if v_0.Op != OpAMD64MOVQconst {
7226 break
7227 }
7228 x := auxIntToInt64(v_0.AuxInt)
7229 if v_1.Op != OpAMD64MOVQconst {
7230 break
7231 }
7232 y := auxIntToInt64(v_1.AuxInt)
7233 if !(x < y && uint64(x) < uint64(y)) {
7234 break
7235 }
7236 v.reset(OpAMD64FlagLT_ULT)
7237 return true
7238 }
7239
7240
7241
7242 for {
7243 if v_0.Op != OpAMD64MOVQconst {
7244 break
7245 }
7246 x := auxIntToInt64(v_0.AuxInt)
7247 if v_1.Op != OpAMD64MOVQconst {
7248 break
7249 }
7250 y := auxIntToInt64(v_1.AuxInt)
7251 if !(x < y && uint64(x) > uint64(y)) {
7252 break
7253 }
7254 v.reset(OpAMD64FlagLT_UGT)
7255 return true
7256 }
7257
7258
7259
7260 for {
7261 if v_0.Op != OpAMD64MOVQconst {
7262 break
7263 }
7264 x := auxIntToInt64(v_0.AuxInt)
7265 if v_1.Op != OpAMD64MOVQconst {
7266 break
7267 }
7268 y := auxIntToInt64(v_1.AuxInt)
7269 if !(x > y && uint64(x) < uint64(y)) {
7270 break
7271 }
7272 v.reset(OpAMD64FlagGT_ULT)
7273 return true
7274 }
7275
7276
7277
7278 for {
7279 if v_0.Op != OpAMD64MOVQconst {
7280 break
7281 }
7282 x := auxIntToInt64(v_0.AuxInt)
7283 if v_1.Op != OpAMD64MOVQconst {
7284 break
7285 }
7286 y := auxIntToInt64(v_1.AuxInt)
7287 if !(x > y && uint64(x) > uint64(y)) {
7288 break
7289 }
7290 v.reset(OpAMD64FlagGT_UGT)
7291 return true
7292 }
7293
7294
7295
7296 for {
7297 l := v_0
7298 if l.Op != OpAMD64MOVQload {
7299 break
7300 }
7301 off := auxIntToInt32(l.AuxInt)
7302 sym := auxToSym(l.Aux)
7303 mem := l.Args[1]
7304 ptr := l.Args[0]
7305 x := v_1
7306 if !(canMergeLoad(v, l) && clobber(l)) {
7307 break
7308 }
7309 v.reset(OpAMD64CMPQload)
7310 v.AuxInt = int32ToAuxInt(off)
7311 v.Aux = symToAux(sym)
7312 v.AddArg3(ptr, x, mem)
7313 return true
7314 }
7315
7316
7317
7318 for {
7319 x := v_0
7320 l := v_1
7321 if l.Op != OpAMD64MOVQload {
7322 break
7323 }
7324 off := auxIntToInt32(l.AuxInt)
7325 sym := auxToSym(l.Aux)
7326 mem := l.Args[1]
7327 ptr := l.Args[0]
7328 if !(canMergeLoad(v, l) && clobber(l)) {
7329 break
7330 }
7331 v.reset(OpAMD64InvertFlags)
7332 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7333 v0.AuxInt = int32ToAuxInt(off)
7334 v0.Aux = symToAux(sym)
7335 v0.AddArg3(ptr, x, mem)
7336 v.AddArg(v0)
7337 return true
7338 }
7339 return false
7340 }
7341 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7342 v_0 := v.Args[0]
7343 b := v.Block
7344
7345
7346 for {
7347 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7348 break
7349 }
7350 v_0_0 := v_0.Args[0]
7351 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
7352 break
7353 }
7354 v_0_0_0 := v_0_0.Args[0]
7355 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
7356 break
7357 }
7358 v.reset(OpAMD64FlagLT_ULT)
7359 return true
7360 }
7361
7362
7363 for {
7364 if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
7365 break
7366 }
7367 v_0_0 := v_0.Args[0]
7368 if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
7369 break
7370 }
7371 v_0_0_0 := v_0_0.Args[0]
7372 if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
7373 break
7374 }
7375 v.reset(OpAMD64FlagLT_ULT)
7376 return true
7377 }
7378
7379
7380
7381 for {
7382 y := auxIntToInt32(v.AuxInt)
7383 if v_0.Op != OpAMD64MOVQconst {
7384 break
7385 }
7386 x := auxIntToInt64(v_0.AuxInt)
7387 if !(x == int64(y)) {
7388 break
7389 }
7390 v.reset(OpAMD64FlagEQ)
7391 return true
7392 }
7393
7394
7395
7396 for {
7397 y := auxIntToInt32(v.AuxInt)
7398 if v_0.Op != OpAMD64MOVQconst {
7399 break
7400 }
7401 x := auxIntToInt64(v_0.AuxInt)
7402 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7403 break
7404 }
7405 v.reset(OpAMD64FlagLT_ULT)
7406 return true
7407 }
7408
7409
7410
7411 for {
7412 y := auxIntToInt32(v.AuxInt)
7413 if v_0.Op != OpAMD64MOVQconst {
7414 break
7415 }
7416 x := auxIntToInt64(v_0.AuxInt)
7417 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7418 break
7419 }
7420 v.reset(OpAMD64FlagLT_UGT)
7421 return true
7422 }
7423
7424
7425
7426 for {
7427 y := auxIntToInt32(v.AuxInt)
7428 if v_0.Op != OpAMD64MOVQconst {
7429 break
7430 }
7431 x := auxIntToInt64(v_0.AuxInt)
7432 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7433 break
7434 }
7435 v.reset(OpAMD64FlagGT_ULT)
7436 return true
7437 }
7438
7439
7440
7441 for {
7442 y := auxIntToInt32(v.AuxInt)
7443 if v_0.Op != OpAMD64MOVQconst {
7444 break
7445 }
7446 x := auxIntToInt64(v_0.AuxInt)
7447 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7448 break
7449 }
7450 v.reset(OpAMD64FlagGT_UGT)
7451 return true
7452 }
7453
7454
7455
7456 for {
7457 c := auxIntToInt32(v.AuxInt)
7458 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7459 break
7460 }
7461 v.reset(OpAMD64FlagLT_ULT)
7462 return true
7463 }
7464
7465
7466
7467 for {
7468 c := auxIntToInt32(v.AuxInt)
7469 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7470 break
7471 }
7472 v.reset(OpAMD64FlagLT_ULT)
7473 return true
7474 }
7475
7476
7477
7478 for {
7479 n := auxIntToInt32(v.AuxInt)
7480 if v_0.Op != OpAMD64SHRQconst {
7481 break
7482 }
7483 c := auxIntToInt8(v_0.AuxInt)
7484 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7485 break
7486 }
7487 v.reset(OpAMD64FlagLT_ULT)
7488 return true
7489 }
7490
7491
7492
7493 for {
7494 n := auxIntToInt32(v.AuxInt)
7495 if v_0.Op != OpAMD64ANDQconst {
7496 break
7497 }
7498 m := auxIntToInt32(v_0.AuxInt)
7499 if !(0 <= m && m < n) {
7500 break
7501 }
7502 v.reset(OpAMD64FlagLT_ULT)
7503 return true
7504 }
7505
7506
7507
7508 for {
7509 n := auxIntToInt32(v.AuxInt)
7510 if v_0.Op != OpAMD64ANDLconst {
7511 break
7512 }
7513 m := auxIntToInt32(v_0.AuxInt)
7514 if !(0 <= m && m < n) {
7515 break
7516 }
7517 v.reset(OpAMD64FlagLT_ULT)
7518 return true
7519 }
7520
7521
7522
7523 for {
7524 if auxIntToInt32(v.AuxInt) != 0 {
7525 break
7526 }
7527 a := v_0
7528 if a.Op != OpAMD64ANDQ {
7529 break
7530 }
7531 y := a.Args[1]
7532 x := a.Args[0]
7533 if !(a.Uses == 1) {
7534 break
7535 }
7536 v.reset(OpAMD64TESTQ)
7537 v.AddArg2(x, y)
7538 return true
7539 }
7540
7541
7542
7543 for {
7544 if auxIntToInt32(v.AuxInt) != 0 {
7545 break
7546 }
7547 a := v_0
7548 if a.Op != OpAMD64ANDQconst {
7549 break
7550 }
7551 c := auxIntToInt32(a.AuxInt)
7552 x := a.Args[0]
7553 if !(a.Uses == 1) {
7554 break
7555 }
7556 v.reset(OpAMD64TESTQconst)
7557 v.AuxInt = int32ToAuxInt(c)
7558 v.AddArg(x)
7559 return true
7560 }
7561
7562
7563 for {
7564 if auxIntToInt32(v.AuxInt) != 0 {
7565 break
7566 }
7567 x := v_0
7568 v.reset(OpAMD64TESTQ)
7569 v.AddArg2(x, x)
7570 return true
7571 }
7572
7573
7574
7575 for {
7576 c := auxIntToInt32(v.AuxInt)
7577 l := v_0
7578 if l.Op != OpAMD64MOVQload {
7579 break
7580 }
7581 off := auxIntToInt32(l.AuxInt)
7582 sym := auxToSym(l.Aux)
7583 mem := l.Args[1]
7584 ptr := l.Args[0]
7585 if !(l.Uses == 1 && clobber(l)) {
7586 break
7587 }
7588 b = l.Block
7589 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7590 v.copyOf(v0)
7591 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7592 v0.Aux = symToAux(sym)
7593 v0.AddArg2(ptr, mem)
7594 return true
7595 }
7596 return false
7597 }
7598 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7599 v_1 := v.Args[1]
7600 v_0 := v.Args[0]
7601
7602
7603
7604 for {
7605 valoff1 := auxIntToValAndOff(v.AuxInt)
7606 sym := auxToSym(v.Aux)
7607 if v_0.Op != OpAMD64ADDQconst {
7608 break
7609 }
7610 off2 := auxIntToInt32(v_0.AuxInt)
7611 base := v_0.Args[0]
7612 mem := v_1
7613 if !(ValAndOff(valoff1).canAdd32(off2)) {
7614 break
7615 }
7616 v.reset(OpAMD64CMPQconstload)
7617 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7618 v.Aux = symToAux(sym)
7619 v.AddArg2(base, mem)
7620 return true
7621 }
7622
7623
7624
7625 for {
7626 valoff1 := auxIntToValAndOff(v.AuxInt)
7627 sym1 := auxToSym(v.Aux)
7628 if v_0.Op != OpAMD64LEAQ {
7629 break
7630 }
7631 off2 := auxIntToInt32(v_0.AuxInt)
7632 sym2 := auxToSym(v_0.Aux)
7633 base := v_0.Args[0]
7634 mem := v_1
7635 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7636 break
7637 }
7638 v.reset(OpAMD64CMPQconstload)
7639 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7640 v.Aux = symToAux(mergeSym(sym1, sym2))
7641 v.AddArg2(base, mem)
7642 return true
7643 }
7644 return false
7645 }
7646 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7647 v_2 := v.Args[2]
7648 v_1 := v.Args[1]
7649 v_0 := v.Args[0]
7650
7651
7652
7653 for {
7654 off1 := auxIntToInt32(v.AuxInt)
7655 sym := auxToSym(v.Aux)
7656 if v_0.Op != OpAMD64ADDQconst {
7657 break
7658 }
7659 off2 := auxIntToInt32(v_0.AuxInt)
7660 base := v_0.Args[0]
7661 val := v_1
7662 mem := v_2
7663 if !(is32Bit(int64(off1) + int64(off2))) {
7664 break
7665 }
7666 v.reset(OpAMD64CMPQload)
7667 v.AuxInt = int32ToAuxInt(off1 + off2)
7668 v.Aux = symToAux(sym)
7669 v.AddArg3(base, val, mem)
7670 return true
7671 }
7672
7673
7674
7675 for {
7676 off1 := auxIntToInt32(v.AuxInt)
7677 sym1 := auxToSym(v.Aux)
7678 if v_0.Op != OpAMD64LEAQ {
7679 break
7680 }
7681 off2 := auxIntToInt32(v_0.AuxInt)
7682 sym2 := auxToSym(v_0.Aux)
7683 base := v_0.Args[0]
7684 val := v_1
7685 mem := v_2
7686 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7687 break
7688 }
7689 v.reset(OpAMD64CMPQload)
7690 v.AuxInt = int32ToAuxInt(off1 + off2)
7691 v.Aux = symToAux(mergeSym(sym1, sym2))
7692 v.AddArg3(base, val, mem)
7693 return true
7694 }
7695
7696
7697
7698 for {
7699 off := auxIntToInt32(v.AuxInt)
7700 sym := auxToSym(v.Aux)
7701 ptr := v_0
7702 if v_1.Op != OpAMD64MOVQconst {
7703 break
7704 }
7705 c := auxIntToInt64(v_1.AuxInt)
7706 mem := v_2
7707 if !(validVal(c)) {
7708 break
7709 }
7710 v.reset(OpAMD64CMPQconstload)
7711 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7712 v.Aux = symToAux(sym)
7713 v.AddArg2(ptr, mem)
7714 return true
7715 }
7716 return false
7717 }
7718 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7719 v_1 := v.Args[1]
7720 v_0 := v.Args[0]
7721 b := v.Block
7722
7723
7724 for {
7725 x := v_0
7726 if v_1.Op != OpAMD64MOVLconst {
7727 break
7728 }
7729 c := auxIntToInt32(v_1.AuxInt)
7730 v.reset(OpAMD64CMPWconst)
7731 v.AuxInt = int16ToAuxInt(int16(c))
7732 v.AddArg(x)
7733 return true
7734 }
7735
7736
7737 for {
7738 if v_0.Op != OpAMD64MOVLconst {
7739 break
7740 }
7741 c := auxIntToInt32(v_0.AuxInt)
7742 x := v_1
7743 v.reset(OpAMD64InvertFlags)
7744 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7745 v0.AuxInt = int16ToAuxInt(int16(c))
7746 v0.AddArg(x)
7747 v.AddArg(v0)
7748 return true
7749 }
7750
7751
7752
7753 for {
7754 x := v_0
7755 y := v_1
7756 if !(canonLessThan(x, y)) {
7757 break
7758 }
7759 v.reset(OpAMD64InvertFlags)
7760 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7761 v0.AddArg2(y, x)
7762 v.AddArg(v0)
7763 return true
7764 }
7765
7766
7767
7768 for {
7769 l := v_0
7770 if l.Op != OpAMD64MOVWload {
7771 break
7772 }
7773 off := auxIntToInt32(l.AuxInt)
7774 sym := auxToSym(l.Aux)
7775 mem := l.Args[1]
7776 ptr := l.Args[0]
7777 x := v_1
7778 if !(canMergeLoad(v, l) && clobber(l)) {
7779 break
7780 }
7781 v.reset(OpAMD64CMPWload)
7782 v.AuxInt = int32ToAuxInt(off)
7783 v.Aux = symToAux(sym)
7784 v.AddArg3(ptr, x, mem)
7785 return true
7786 }
7787
7788
7789
7790 for {
7791 x := v_0
7792 l := v_1
7793 if l.Op != OpAMD64MOVWload {
7794 break
7795 }
7796 off := auxIntToInt32(l.AuxInt)
7797 sym := auxToSym(l.Aux)
7798 mem := l.Args[1]
7799 ptr := l.Args[0]
7800 if !(canMergeLoad(v, l) && clobber(l)) {
7801 break
7802 }
7803 v.reset(OpAMD64InvertFlags)
7804 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7805 v0.AuxInt = int32ToAuxInt(off)
7806 v0.Aux = symToAux(sym)
7807 v0.AddArg3(ptr, x, mem)
7808 v.AddArg(v0)
7809 return true
7810 }
7811 return false
7812 }
7813 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7814 v_0 := v.Args[0]
7815 b := v.Block
7816
7817
7818
7819 for {
7820 y := auxIntToInt16(v.AuxInt)
7821 if v_0.Op != OpAMD64MOVLconst {
7822 break
7823 }
7824 x := auxIntToInt32(v_0.AuxInt)
7825 if !(int16(x) == y) {
7826 break
7827 }
7828 v.reset(OpAMD64FlagEQ)
7829 return true
7830 }
7831
7832
7833
7834 for {
7835 y := auxIntToInt16(v.AuxInt)
7836 if v_0.Op != OpAMD64MOVLconst {
7837 break
7838 }
7839 x := auxIntToInt32(v_0.AuxInt)
7840 if !(int16(x) < y && uint16(x) < uint16(y)) {
7841 break
7842 }
7843 v.reset(OpAMD64FlagLT_ULT)
7844 return true
7845 }
7846
7847
7848
7849 for {
7850 y := auxIntToInt16(v.AuxInt)
7851 if v_0.Op != OpAMD64MOVLconst {
7852 break
7853 }
7854 x := auxIntToInt32(v_0.AuxInt)
7855 if !(int16(x) < y && uint16(x) > uint16(y)) {
7856 break
7857 }
7858 v.reset(OpAMD64FlagLT_UGT)
7859 return true
7860 }
7861
7862
7863
7864 for {
7865 y := auxIntToInt16(v.AuxInt)
7866 if v_0.Op != OpAMD64MOVLconst {
7867 break
7868 }
7869 x := auxIntToInt32(v_0.AuxInt)
7870 if !(int16(x) > y && uint16(x) < uint16(y)) {
7871 break
7872 }
7873 v.reset(OpAMD64FlagGT_ULT)
7874 return true
7875 }
7876
7877
7878
7879 for {
7880 y := auxIntToInt16(v.AuxInt)
7881 if v_0.Op != OpAMD64MOVLconst {
7882 break
7883 }
7884 x := auxIntToInt32(v_0.AuxInt)
7885 if !(int16(x) > y && uint16(x) > uint16(y)) {
7886 break
7887 }
7888 v.reset(OpAMD64FlagGT_UGT)
7889 return true
7890 }
7891
7892
7893
7894 for {
7895 n := auxIntToInt16(v.AuxInt)
7896 if v_0.Op != OpAMD64ANDLconst {
7897 break
7898 }
7899 m := auxIntToInt32(v_0.AuxInt)
7900 if !(0 <= int16(m) && int16(m) < n) {
7901 break
7902 }
7903 v.reset(OpAMD64FlagLT_ULT)
7904 return true
7905 }
7906
7907
7908
7909 for {
7910 if auxIntToInt16(v.AuxInt) != 0 {
7911 break
7912 }
7913 a := v_0
7914 if a.Op != OpAMD64ANDL {
7915 break
7916 }
7917 y := a.Args[1]
7918 x := a.Args[0]
7919 if !(a.Uses == 1) {
7920 break
7921 }
7922 v.reset(OpAMD64TESTW)
7923 v.AddArg2(x, y)
7924 return true
7925 }
7926
7927
7928
7929 for {
7930 if auxIntToInt16(v.AuxInt) != 0 {
7931 break
7932 }
7933 a := v_0
7934 if a.Op != OpAMD64ANDLconst {
7935 break
7936 }
7937 c := auxIntToInt32(a.AuxInt)
7938 x := a.Args[0]
7939 if !(a.Uses == 1) {
7940 break
7941 }
7942 v.reset(OpAMD64TESTWconst)
7943 v.AuxInt = int16ToAuxInt(int16(c))
7944 v.AddArg(x)
7945 return true
7946 }
7947
7948
7949 for {
7950 if auxIntToInt16(v.AuxInt) != 0 {
7951 break
7952 }
7953 x := v_0
7954 v.reset(OpAMD64TESTW)
7955 v.AddArg2(x, x)
7956 return true
7957 }
7958
7959
7960
7961 for {
7962 c := auxIntToInt16(v.AuxInt)
7963 l := v_0
7964 if l.Op != OpAMD64MOVWload {
7965 break
7966 }
7967 off := auxIntToInt32(l.AuxInt)
7968 sym := auxToSym(l.Aux)
7969 mem := l.Args[1]
7970 ptr := l.Args[0]
7971 if !(l.Uses == 1 && clobber(l)) {
7972 break
7973 }
7974 b = l.Block
7975 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
7976 v.copyOf(v0)
7977 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7978 v0.Aux = symToAux(sym)
7979 v0.AddArg2(ptr, mem)
7980 return true
7981 }
7982 return false
7983 }
7984 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
7985 v_1 := v.Args[1]
7986 v_0 := v.Args[0]
7987
7988
7989
7990 for {
7991 valoff1 := auxIntToValAndOff(v.AuxInt)
7992 sym := auxToSym(v.Aux)
7993 if v_0.Op != OpAMD64ADDQconst {
7994 break
7995 }
7996 off2 := auxIntToInt32(v_0.AuxInt)
7997 base := v_0.Args[0]
7998 mem := v_1
7999 if !(ValAndOff(valoff1).canAdd32(off2)) {
8000 break
8001 }
8002 v.reset(OpAMD64CMPWconstload)
8003 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8004 v.Aux = symToAux(sym)
8005 v.AddArg2(base, mem)
8006 return true
8007 }
8008
8009
8010
8011 for {
8012 valoff1 := auxIntToValAndOff(v.AuxInt)
8013 sym1 := auxToSym(v.Aux)
8014 if v_0.Op != OpAMD64LEAQ {
8015 break
8016 }
8017 off2 := auxIntToInt32(v_0.AuxInt)
8018 sym2 := auxToSym(v_0.Aux)
8019 base := v_0.Args[0]
8020 mem := v_1
8021 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8022 break
8023 }
8024 v.reset(OpAMD64CMPWconstload)
8025 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8026 v.Aux = symToAux(mergeSym(sym1, sym2))
8027 v.AddArg2(base, mem)
8028 return true
8029 }
8030 return false
8031 }
8032 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8033 v_2 := v.Args[2]
8034 v_1 := v.Args[1]
8035 v_0 := v.Args[0]
8036
8037
8038
8039 for {
8040 off1 := auxIntToInt32(v.AuxInt)
8041 sym := auxToSym(v.Aux)
8042 if v_0.Op != OpAMD64ADDQconst {
8043 break
8044 }
8045 off2 := auxIntToInt32(v_0.AuxInt)
8046 base := v_0.Args[0]
8047 val := v_1
8048 mem := v_2
8049 if !(is32Bit(int64(off1) + int64(off2))) {
8050 break
8051 }
8052 v.reset(OpAMD64CMPWload)
8053 v.AuxInt = int32ToAuxInt(off1 + off2)
8054 v.Aux = symToAux(sym)
8055 v.AddArg3(base, val, mem)
8056 return true
8057 }
8058
8059
8060
8061 for {
8062 off1 := auxIntToInt32(v.AuxInt)
8063 sym1 := auxToSym(v.Aux)
8064 if v_0.Op != OpAMD64LEAQ {
8065 break
8066 }
8067 off2 := auxIntToInt32(v_0.AuxInt)
8068 sym2 := auxToSym(v_0.Aux)
8069 base := v_0.Args[0]
8070 val := v_1
8071 mem := v_2
8072 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8073 break
8074 }
8075 v.reset(OpAMD64CMPWload)
8076 v.AuxInt = int32ToAuxInt(off1 + off2)
8077 v.Aux = symToAux(mergeSym(sym1, sym2))
8078 v.AddArg3(base, val, mem)
8079 return true
8080 }
8081
8082
8083 for {
8084 off := auxIntToInt32(v.AuxInt)
8085 sym := auxToSym(v.Aux)
8086 ptr := v_0
8087 if v_1.Op != OpAMD64MOVLconst {
8088 break
8089 }
8090 c := auxIntToInt32(v_1.AuxInt)
8091 mem := v_2
8092 v.reset(OpAMD64CMPWconstload)
8093 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8094 v.Aux = symToAux(sym)
8095 v.AddArg2(ptr, mem)
8096 return true
8097 }
8098 return false
8099 }
8100 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8101 v_3 := v.Args[3]
8102 v_2 := v.Args[2]
8103 v_1 := v.Args[1]
8104 v_0 := v.Args[0]
8105
8106
8107
8108 for {
8109 off1 := auxIntToInt32(v.AuxInt)
8110 sym := auxToSym(v.Aux)
8111 if v_0.Op != OpAMD64ADDQconst {
8112 break
8113 }
8114 off2 := auxIntToInt32(v_0.AuxInt)
8115 ptr := v_0.Args[0]
8116 old := v_1
8117 new_ := v_2
8118 mem := v_3
8119 if !(is32Bit(int64(off1) + int64(off2))) {
8120 break
8121 }
8122 v.reset(OpAMD64CMPXCHGLlock)
8123 v.AuxInt = int32ToAuxInt(off1 + off2)
8124 v.Aux = symToAux(sym)
8125 v.AddArg4(ptr, old, new_, mem)
8126 return true
8127 }
8128 return false
8129 }
8130 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8131 v_3 := v.Args[3]
8132 v_2 := v.Args[2]
8133 v_1 := v.Args[1]
8134 v_0 := v.Args[0]
8135
8136
8137
8138 for {
8139 off1 := auxIntToInt32(v.AuxInt)
8140 sym := auxToSym(v.Aux)
8141 if v_0.Op != OpAMD64ADDQconst {
8142 break
8143 }
8144 off2 := auxIntToInt32(v_0.AuxInt)
8145 ptr := v_0.Args[0]
8146 old := v_1
8147 new_ := v_2
8148 mem := v_3
8149 if !(is32Bit(int64(off1) + int64(off2))) {
8150 break
8151 }
8152 v.reset(OpAMD64CMPXCHGQlock)
8153 v.AuxInt = int32ToAuxInt(off1 + off2)
8154 v.Aux = symToAux(sym)
8155 v.AddArg4(ptr, old, new_, mem)
8156 return true
8157 }
8158 return false
8159 }
8160 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8161 v_1 := v.Args[1]
8162 v_0 := v.Args[0]
8163
8164
8165
8166 for {
8167 x := v_0
8168 l := v_1
8169 if l.Op != OpAMD64MOVSDload {
8170 break
8171 }
8172 off := auxIntToInt32(l.AuxInt)
8173 sym := auxToSym(l.Aux)
8174 mem := l.Args[1]
8175 ptr := l.Args[0]
8176 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8177 break
8178 }
8179 v.reset(OpAMD64DIVSDload)
8180 v.AuxInt = int32ToAuxInt(off)
8181 v.Aux = symToAux(sym)
8182 v.AddArg3(x, ptr, mem)
8183 return true
8184 }
8185 return false
8186 }
8187 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8188 v_2 := v.Args[2]
8189 v_1 := v.Args[1]
8190 v_0 := v.Args[0]
8191
8192
8193
8194 for {
8195 off1 := auxIntToInt32(v.AuxInt)
8196 sym := auxToSym(v.Aux)
8197 val := v_0
8198 if v_1.Op != OpAMD64ADDQconst {
8199 break
8200 }
8201 off2 := auxIntToInt32(v_1.AuxInt)
8202 base := v_1.Args[0]
8203 mem := v_2
8204 if !(is32Bit(int64(off1) + int64(off2))) {
8205 break
8206 }
8207 v.reset(OpAMD64DIVSDload)
8208 v.AuxInt = int32ToAuxInt(off1 + off2)
8209 v.Aux = symToAux(sym)
8210 v.AddArg3(val, base, mem)
8211 return true
8212 }
8213
8214
8215
8216 for {
8217 off1 := auxIntToInt32(v.AuxInt)
8218 sym1 := auxToSym(v.Aux)
8219 val := v_0
8220 if v_1.Op != OpAMD64LEAQ {
8221 break
8222 }
8223 off2 := auxIntToInt32(v_1.AuxInt)
8224 sym2 := auxToSym(v_1.Aux)
8225 base := v_1.Args[0]
8226 mem := v_2
8227 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8228 break
8229 }
8230 v.reset(OpAMD64DIVSDload)
8231 v.AuxInt = int32ToAuxInt(off1 + off2)
8232 v.Aux = symToAux(mergeSym(sym1, sym2))
8233 v.AddArg3(val, base, mem)
8234 return true
8235 }
8236 return false
8237 }
8238 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8239 v_1 := v.Args[1]
8240 v_0 := v.Args[0]
8241
8242
8243
8244 for {
8245 x := v_0
8246 l := v_1
8247 if l.Op != OpAMD64MOVSSload {
8248 break
8249 }
8250 off := auxIntToInt32(l.AuxInt)
8251 sym := auxToSym(l.Aux)
8252 mem := l.Args[1]
8253 ptr := l.Args[0]
8254 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8255 break
8256 }
8257 v.reset(OpAMD64DIVSSload)
8258 v.AuxInt = int32ToAuxInt(off)
8259 v.Aux = symToAux(sym)
8260 v.AddArg3(x, ptr, mem)
8261 return true
8262 }
8263 return false
8264 }
8265 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8266 v_2 := v.Args[2]
8267 v_1 := v.Args[1]
8268 v_0 := v.Args[0]
8269
8270
8271
8272 for {
8273 off1 := auxIntToInt32(v.AuxInt)
8274 sym := auxToSym(v.Aux)
8275 val := v_0
8276 if v_1.Op != OpAMD64ADDQconst {
8277 break
8278 }
8279 off2 := auxIntToInt32(v_1.AuxInt)
8280 base := v_1.Args[0]
8281 mem := v_2
8282 if !(is32Bit(int64(off1) + int64(off2))) {
8283 break
8284 }
8285 v.reset(OpAMD64DIVSSload)
8286 v.AuxInt = int32ToAuxInt(off1 + off2)
8287 v.Aux = symToAux(sym)
8288 v.AddArg3(val, base, mem)
8289 return true
8290 }
8291
8292
8293
8294 for {
8295 off1 := auxIntToInt32(v.AuxInt)
8296 sym1 := auxToSym(v.Aux)
8297 val := v_0
8298 if v_1.Op != OpAMD64LEAQ {
8299 break
8300 }
8301 off2 := auxIntToInt32(v_1.AuxInt)
8302 sym2 := auxToSym(v_1.Aux)
8303 base := v_1.Args[0]
8304 mem := v_2
8305 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8306 break
8307 }
8308 v.reset(OpAMD64DIVSSload)
8309 v.AuxInt = int32ToAuxInt(off1 + off2)
8310 v.Aux = symToAux(mergeSym(sym1, sym2))
8311 v.AddArg3(val, base, mem)
8312 return true
8313 }
8314 return false
8315 }
8316 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8317 v_1 := v.Args[1]
8318 v_0 := v.Args[0]
8319
8320
8321
8322 for {
8323 x := v_0
8324 y := v_1
8325 if !(!x.rematerializeable() && y.rematerializeable()) {
8326 break
8327 }
8328 v.reset(OpAMD64HMULL)
8329 v.AddArg2(y, x)
8330 return true
8331 }
8332 return false
8333 }
8334 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8335 v_1 := v.Args[1]
8336 v_0 := v.Args[0]
8337
8338
8339
8340 for {
8341 x := v_0
8342 y := v_1
8343 if !(!x.rematerializeable() && y.rematerializeable()) {
8344 break
8345 }
8346 v.reset(OpAMD64HMULLU)
8347 v.AddArg2(y, x)
8348 return true
8349 }
8350 return false
8351 }
8352 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8353 v_1 := v.Args[1]
8354 v_0 := v.Args[0]
8355
8356
8357
8358 for {
8359 x := v_0
8360 y := v_1
8361 if !(!x.rematerializeable() && y.rematerializeable()) {
8362 break
8363 }
8364 v.reset(OpAMD64HMULQ)
8365 v.AddArg2(y, x)
8366 return true
8367 }
8368 return false
8369 }
8370 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8371 v_1 := v.Args[1]
8372 v_0 := v.Args[0]
8373
8374
8375
8376 for {
8377 x := v_0
8378 y := v_1
8379 if !(!x.rematerializeable() && y.rematerializeable()) {
8380 break
8381 }
8382 v.reset(OpAMD64HMULQU)
8383 v.AddArg2(y, x)
8384 return true
8385 }
8386 return false
8387 }
8388 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8389 v_0 := v.Args[0]
8390
8391
8392
8393 for {
8394 c := auxIntToInt32(v.AuxInt)
8395 s := auxToSym(v.Aux)
8396 if v_0.Op != OpAMD64ADDLconst {
8397 break
8398 }
8399 d := auxIntToInt32(v_0.AuxInt)
8400 x := v_0.Args[0]
8401 if !(is32Bit(int64(c) + int64(d))) {
8402 break
8403 }
8404 v.reset(OpAMD64LEAL)
8405 v.AuxInt = int32ToAuxInt(c + d)
8406 v.Aux = symToAux(s)
8407 v.AddArg(x)
8408 return true
8409 }
8410
8411
8412
8413 for {
8414 c := auxIntToInt32(v.AuxInt)
8415 s := auxToSym(v.Aux)
8416 if v_0.Op != OpAMD64ADDL {
8417 break
8418 }
8419 _ = v_0.Args[1]
8420 v_0_0 := v_0.Args[0]
8421 v_0_1 := v_0.Args[1]
8422 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8423 x := v_0_0
8424 y := v_0_1
8425 if !(x.Op != OpSB && y.Op != OpSB) {
8426 continue
8427 }
8428 v.reset(OpAMD64LEAL1)
8429 v.AuxInt = int32ToAuxInt(c)
8430 v.Aux = symToAux(s)
8431 v.AddArg2(x, y)
8432 return true
8433 }
8434 break
8435 }
8436 return false
8437 }
8438 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8439 v_1 := v.Args[1]
8440 v_0 := v.Args[0]
8441
8442
8443
8444 for {
8445 c := auxIntToInt32(v.AuxInt)
8446 s := auxToSym(v.Aux)
8447 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8448 if v_0.Op != OpAMD64ADDLconst {
8449 continue
8450 }
8451 d := auxIntToInt32(v_0.AuxInt)
8452 x := v_0.Args[0]
8453 y := v_1
8454 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8455 continue
8456 }
8457 v.reset(OpAMD64LEAL1)
8458 v.AuxInt = int32ToAuxInt(c + d)
8459 v.Aux = symToAux(s)
8460 v.AddArg2(x, y)
8461 return true
8462 }
8463 break
8464 }
8465
8466
8467 for {
8468 c := auxIntToInt32(v.AuxInt)
8469 s := auxToSym(v.Aux)
8470 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8471 x := v_0
8472 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8473 continue
8474 }
8475 y := v_1.Args[0]
8476 v.reset(OpAMD64LEAL2)
8477 v.AuxInt = int32ToAuxInt(c)
8478 v.Aux = symToAux(s)
8479 v.AddArg2(x, y)
8480 return true
8481 }
8482 break
8483 }
8484
8485
8486 for {
8487 c := auxIntToInt32(v.AuxInt)
8488 s := auxToSym(v.Aux)
8489 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8490 x := v_0
8491 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8492 continue
8493 }
8494 y := v_1.Args[0]
8495 v.reset(OpAMD64LEAL4)
8496 v.AuxInt = int32ToAuxInt(c)
8497 v.Aux = symToAux(s)
8498 v.AddArg2(x, y)
8499 return true
8500 }
8501 break
8502 }
8503
8504
8505 for {
8506 c := auxIntToInt32(v.AuxInt)
8507 s := auxToSym(v.Aux)
8508 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8509 x := v_0
8510 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8511 continue
8512 }
8513 y := v_1.Args[0]
8514 v.reset(OpAMD64LEAL8)
8515 v.AuxInt = int32ToAuxInt(c)
8516 v.Aux = symToAux(s)
8517 v.AddArg2(x, y)
8518 return true
8519 }
8520 break
8521 }
8522 return false
8523 }
8524 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8525 v_1 := v.Args[1]
8526 v_0 := v.Args[0]
8527
8528
8529
8530 for {
8531 c := auxIntToInt32(v.AuxInt)
8532 s := auxToSym(v.Aux)
8533 if v_0.Op != OpAMD64ADDLconst {
8534 break
8535 }
8536 d := auxIntToInt32(v_0.AuxInt)
8537 x := v_0.Args[0]
8538 y := v_1
8539 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8540 break
8541 }
8542 v.reset(OpAMD64LEAL2)
8543 v.AuxInt = int32ToAuxInt(c + d)
8544 v.Aux = symToAux(s)
8545 v.AddArg2(x, y)
8546 return true
8547 }
8548
8549
8550
8551 for {
8552 c := auxIntToInt32(v.AuxInt)
8553 s := auxToSym(v.Aux)
8554 x := v_0
8555 if v_1.Op != OpAMD64ADDLconst {
8556 break
8557 }
8558 d := auxIntToInt32(v_1.AuxInt)
8559 y := v_1.Args[0]
8560 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8561 break
8562 }
8563 v.reset(OpAMD64LEAL2)
8564 v.AuxInt = int32ToAuxInt(c + 2*d)
8565 v.Aux = symToAux(s)
8566 v.AddArg2(x, y)
8567 return true
8568 }
8569
8570
8571 for {
8572 c := auxIntToInt32(v.AuxInt)
8573 s := auxToSym(v.Aux)
8574 x := v_0
8575 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8576 break
8577 }
8578 y := v_1.Args[0]
8579 v.reset(OpAMD64LEAL4)
8580 v.AuxInt = int32ToAuxInt(c)
8581 v.Aux = symToAux(s)
8582 v.AddArg2(x, y)
8583 return true
8584 }
8585
8586
8587 for {
8588 c := auxIntToInt32(v.AuxInt)
8589 s := auxToSym(v.Aux)
8590 x := v_0
8591 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8592 break
8593 }
8594 y := v_1.Args[0]
8595 v.reset(OpAMD64LEAL8)
8596 v.AuxInt = int32ToAuxInt(c)
8597 v.Aux = symToAux(s)
8598 v.AddArg2(x, y)
8599 return true
8600 }
8601 return false
8602 }
8603 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8604 v_1 := v.Args[1]
8605 v_0 := v.Args[0]
8606
8607
8608
8609 for {
8610 c := auxIntToInt32(v.AuxInt)
8611 s := auxToSym(v.Aux)
8612 if v_0.Op != OpAMD64ADDLconst {
8613 break
8614 }
8615 d := auxIntToInt32(v_0.AuxInt)
8616 x := v_0.Args[0]
8617 y := v_1
8618 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8619 break
8620 }
8621 v.reset(OpAMD64LEAL4)
8622 v.AuxInt = int32ToAuxInt(c + d)
8623 v.Aux = symToAux(s)
8624 v.AddArg2(x, y)
8625 return true
8626 }
8627
8628
8629
8630 for {
8631 c := auxIntToInt32(v.AuxInt)
8632 s := auxToSym(v.Aux)
8633 x := v_0
8634 if v_1.Op != OpAMD64ADDLconst {
8635 break
8636 }
8637 d := auxIntToInt32(v_1.AuxInt)
8638 y := v_1.Args[0]
8639 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8640 break
8641 }
8642 v.reset(OpAMD64LEAL4)
8643 v.AuxInt = int32ToAuxInt(c + 4*d)
8644 v.Aux = symToAux(s)
8645 v.AddArg2(x, y)
8646 return true
8647 }
8648
8649
8650 for {
8651 c := auxIntToInt32(v.AuxInt)
8652 s := auxToSym(v.Aux)
8653 x := v_0
8654 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
8655 break
8656 }
8657 y := v_1.Args[0]
8658 v.reset(OpAMD64LEAL8)
8659 v.AuxInt = int32ToAuxInt(c)
8660 v.Aux = symToAux(s)
8661 v.AddArg2(x, y)
8662 return true
8663 }
8664 return false
8665 }
8666 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8667 v_1 := v.Args[1]
8668 v_0 := v.Args[0]
8669
8670
8671
8672 for {
8673 c := auxIntToInt32(v.AuxInt)
8674 s := auxToSym(v.Aux)
8675 if v_0.Op != OpAMD64ADDLconst {
8676 break
8677 }
8678 d := auxIntToInt32(v_0.AuxInt)
8679 x := v_0.Args[0]
8680 y := v_1
8681 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8682 break
8683 }
8684 v.reset(OpAMD64LEAL8)
8685 v.AuxInt = int32ToAuxInt(c + d)
8686 v.Aux = symToAux(s)
8687 v.AddArg2(x, y)
8688 return true
8689 }
8690
8691
8692
8693 for {
8694 c := auxIntToInt32(v.AuxInt)
8695 s := auxToSym(v.Aux)
8696 x := v_0
8697 if v_1.Op != OpAMD64ADDLconst {
8698 break
8699 }
8700 d := auxIntToInt32(v_1.AuxInt)
8701 y := v_1.Args[0]
8702 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8703 break
8704 }
8705 v.reset(OpAMD64LEAL8)
8706 v.AuxInt = int32ToAuxInt(c + 8*d)
8707 v.Aux = symToAux(s)
8708 v.AddArg2(x, y)
8709 return true
8710 }
8711 return false
8712 }
8713 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8714 v_0 := v.Args[0]
8715
8716
8717
8718 for {
8719 c := auxIntToInt32(v.AuxInt)
8720 s := auxToSym(v.Aux)
8721 if v_0.Op != OpAMD64ADDQconst {
8722 break
8723 }
8724 d := auxIntToInt32(v_0.AuxInt)
8725 x := v_0.Args[0]
8726 if !(is32Bit(int64(c) + int64(d))) {
8727 break
8728 }
8729 v.reset(OpAMD64LEAQ)
8730 v.AuxInt = int32ToAuxInt(c + d)
8731 v.Aux = symToAux(s)
8732 v.AddArg(x)
8733 return true
8734 }
8735
8736
8737
8738 for {
8739 c := auxIntToInt32(v.AuxInt)
8740 s := auxToSym(v.Aux)
8741 if v_0.Op != OpAMD64ADDQ {
8742 break
8743 }
8744 _ = v_0.Args[1]
8745 v_0_0 := v_0.Args[0]
8746 v_0_1 := v_0.Args[1]
8747 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8748 x := v_0_0
8749 y := v_0_1
8750 if !(x.Op != OpSB && y.Op != OpSB) {
8751 continue
8752 }
8753 v.reset(OpAMD64LEAQ1)
8754 v.AuxInt = int32ToAuxInt(c)
8755 v.Aux = symToAux(s)
8756 v.AddArg2(x, y)
8757 return true
8758 }
8759 break
8760 }
8761
8762
8763
8764 for {
8765 off1 := auxIntToInt32(v.AuxInt)
8766 sym1 := auxToSym(v.Aux)
8767 if v_0.Op != OpAMD64LEAQ {
8768 break
8769 }
8770 off2 := auxIntToInt32(v_0.AuxInt)
8771 sym2 := auxToSym(v_0.Aux)
8772 x := v_0.Args[0]
8773 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8774 break
8775 }
8776 v.reset(OpAMD64LEAQ)
8777 v.AuxInt = int32ToAuxInt(off1 + off2)
8778 v.Aux = symToAux(mergeSym(sym1, sym2))
8779 v.AddArg(x)
8780 return true
8781 }
8782
8783
8784
8785 for {
8786 off1 := auxIntToInt32(v.AuxInt)
8787 sym1 := auxToSym(v.Aux)
8788 if v_0.Op != OpAMD64LEAQ1 {
8789 break
8790 }
8791 off2 := auxIntToInt32(v_0.AuxInt)
8792 sym2 := auxToSym(v_0.Aux)
8793 y := v_0.Args[1]
8794 x := v_0.Args[0]
8795 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8796 break
8797 }
8798 v.reset(OpAMD64LEAQ1)
8799 v.AuxInt = int32ToAuxInt(off1 + off2)
8800 v.Aux = symToAux(mergeSym(sym1, sym2))
8801 v.AddArg2(x, y)
8802 return true
8803 }
8804
8805
8806
8807 for {
8808 off1 := auxIntToInt32(v.AuxInt)
8809 sym1 := auxToSym(v.Aux)
8810 if v_0.Op != OpAMD64LEAQ2 {
8811 break
8812 }
8813 off2 := auxIntToInt32(v_0.AuxInt)
8814 sym2 := auxToSym(v_0.Aux)
8815 y := v_0.Args[1]
8816 x := v_0.Args[0]
8817 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8818 break
8819 }
8820 v.reset(OpAMD64LEAQ2)
8821 v.AuxInt = int32ToAuxInt(off1 + off2)
8822 v.Aux = symToAux(mergeSym(sym1, sym2))
8823 v.AddArg2(x, y)
8824 return true
8825 }
8826
8827
8828
8829 for {
8830 off1 := auxIntToInt32(v.AuxInt)
8831 sym1 := auxToSym(v.Aux)
8832 if v_0.Op != OpAMD64LEAQ4 {
8833 break
8834 }
8835 off2 := auxIntToInt32(v_0.AuxInt)
8836 sym2 := auxToSym(v_0.Aux)
8837 y := v_0.Args[1]
8838 x := v_0.Args[0]
8839 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8840 break
8841 }
8842 v.reset(OpAMD64LEAQ4)
8843 v.AuxInt = int32ToAuxInt(off1 + off2)
8844 v.Aux = symToAux(mergeSym(sym1, sym2))
8845 v.AddArg2(x, y)
8846 return true
8847 }
8848
8849
8850
8851 for {
8852 off1 := auxIntToInt32(v.AuxInt)
8853 sym1 := auxToSym(v.Aux)
8854 if v_0.Op != OpAMD64LEAQ8 {
8855 break
8856 }
8857 off2 := auxIntToInt32(v_0.AuxInt)
8858 sym2 := auxToSym(v_0.Aux)
8859 y := v_0.Args[1]
8860 x := v_0.Args[0]
8861 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8862 break
8863 }
8864 v.reset(OpAMD64LEAQ8)
8865 v.AuxInt = int32ToAuxInt(off1 + off2)
8866 v.Aux = symToAux(mergeSym(sym1, sym2))
8867 v.AddArg2(x, y)
8868 return true
8869 }
8870 return false
8871 }
8872 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
8873 v_1 := v.Args[1]
8874 v_0 := v.Args[0]
8875
8876
8877
8878 for {
8879 c := auxIntToInt32(v.AuxInt)
8880 s := auxToSym(v.Aux)
8881 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8882 if v_0.Op != OpAMD64ADDQconst {
8883 continue
8884 }
8885 d := auxIntToInt32(v_0.AuxInt)
8886 x := v_0.Args[0]
8887 y := v_1
8888 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8889 continue
8890 }
8891 v.reset(OpAMD64LEAQ1)
8892 v.AuxInt = int32ToAuxInt(c + d)
8893 v.Aux = symToAux(s)
8894 v.AddArg2(x, y)
8895 return true
8896 }
8897 break
8898 }
8899
8900
8901 for {
8902 c := auxIntToInt32(v.AuxInt)
8903 s := auxToSym(v.Aux)
8904 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8905 x := v_0
8906 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
8907 continue
8908 }
8909 y := v_1.Args[0]
8910 v.reset(OpAMD64LEAQ2)
8911 v.AuxInt = int32ToAuxInt(c)
8912 v.Aux = symToAux(s)
8913 v.AddArg2(x, y)
8914 return true
8915 }
8916 break
8917 }
8918
8919
8920 for {
8921 c := auxIntToInt32(v.AuxInt)
8922 s := auxToSym(v.Aux)
8923 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8924 x := v_0
8925 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
8926 continue
8927 }
8928 y := v_1.Args[0]
8929 v.reset(OpAMD64LEAQ4)
8930 v.AuxInt = int32ToAuxInt(c)
8931 v.Aux = symToAux(s)
8932 v.AddArg2(x, y)
8933 return true
8934 }
8935 break
8936 }
8937
8938
8939 for {
8940 c := auxIntToInt32(v.AuxInt)
8941 s := auxToSym(v.Aux)
8942 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8943 x := v_0
8944 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
8945 continue
8946 }
8947 y := v_1.Args[0]
8948 v.reset(OpAMD64LEAQ8)
8949 v.AuxInt = int32ToAuxInt(c)
8950 v.Aux = symToAux(s)
8951 v.AddArg2(x, y)
8952 return true
8953 }
8954 break
8955 }
8956
8957
8958
8959 for {
8960 off1 := auxIntToInt32(v.AuxInt)
8961 sym1 := auxToSym(v.Aux)
8962 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8963 if v_0.Op != OpAMD64LEAQ {
8964 continue
8965 }
8966 off2 := auxIntToInt32(v_0.AuxInt)
8967 sym2 := auxToSym(v_0.Aux)
8968 x := v_0.Args[0]
8969 y := v_1
8970 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
8971 continue
8972 }
8973 v.reset(OpAMD64LEAQ1)
8974 v.AuxInt = int32ToAuxInt(off1 + off2)
8975 v.Aux = symToAux(mergeSym(sym1, sym2))
8976 v.AddArg2(x, y)
8977 return true
8978 }
8979 break
8980 }
8981
8982
8983
8984 for {
8985 off1 := auxIntToInt32(v.AuxInt)
8986 sym1 := auxToSym(v.Aux)
8987 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8988 x := v_0
8989 if v_1.Op != OpAMD64LEAQ1 {
8990 continue
8991 }
8992 off2 := auxIntToInt32(v_1.AuxInt)
8993 sym2 := auxToSym(v_1.Aux)
8994 y := v_1.Args[1]
8995 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8996 continue
8997 }
8998 v.reset(OpAMD64LEAQ2)
8999 v.AuxInt = int32ToAuxInt(off1 + off2)
9000 v.Aux = symToAux(mergeSym(sym1, sym2))
9001 v.AddArg2(x, y)
9002 return true
9003 }
9004 break
9005 }
9006
9007
9008
9009 for {
9010 off1 := auxIntToInt32(v.AuxInt)
9011 sym1 := auxToSym(v.Aux)
9012 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9013 x := v_0
9014 if v_1.Op != OpAMD64LEAQ1 {
9015 continue
9016 }
9017 off2 := auxIntToInt32(v_1.AuxInt)
9018 sym2 := auxToSym(v_1.Aux)
9019 _ = v_1.Args[1]
9020 v_1_0 := v_1.Args[0]
9021 v_1_1 := v_1.Args[1]
9022 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9023 if x != v_1_0 {
9024 continue
9025 }
9026 y := v_1_1
9027 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9028 continue
9029 }
9030 v.reset(OpAMD64LEAQ2)
9031 v.AuxInt = int32ToAuxInt(off1 + off2)
9032 v.Aux = symToAux(mergeSym(sym1, sym2))
9033 v.AddArg2(y, x)
9034 return true
9035 }
9036 }
9037 break
9038 }
9039
9040
9041
9042 for {
9043 if auxIntToInt32(v.AuxInt) != 0 {
9044 break
9045 }
9046 x := v_0
9047 y := v_1
9048 if !(v.Aux == nil) {
9049 break
9050 }
9051 v.reset(OpAMD64ADDQ)
9052 v.AddArg2(x, y)
9053 return true
9054 }
9055 return false
9056 }
9057 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9058 v_1 := v.Args[1]
9059 v_0 := v.Args[0]
9060
9061
9062
9063 for {
9064 c := auxIntToInt32(v.AuxInt)
9065 s := auxToSym(v.Aux)
9066 if v_0.Op != OpAMD64ADDQconst {
9067 break
9068 }
9069 d := auxIntToInt32(v_0.AuxInt)
9070 x := v_0.Args[0]
9071 y := v_1
9072 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9073 break
9074 }
9075 v.reset(OpAMD64LEAQ2)
9076 v.AuxInt = int32ToAuxInt(c + d)
9077 v.Aux = symToAux(s)
9078 v.AddArg2(x, y)
9079 return true
9080 }
9081
9082
9083
9084 for {
9085 c := auxIntToInt32(v.AuxInt)
9086 s := auxToSym(v.Aux)
9087 x := v_0
9088 if v_1.Op != OpAMD64ADDQconst {
9089 break
9090 }
9091 d := auxIntToInt32(v_1.AuxInt)
9092 y := v_1.Args[0]
9093 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9094 break
9095 }
9096 v.reset(OpAMD64LEAQ2)
9097 v.AuxInt = int32ToAuxInt(c + 2*d)
9098 v.Aux = symToAux(s)
9099 v.AddArg2(x, y)
9100 return true
9101 }
9102
9103
9104 for {
9105 c := auxIntToInt32(v.AuxInt)
9106 s := auxToSym(v.Aux)
9107 x := v_0
9108 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9109 break
9110 }
9111 y := v_1.Args[0]
9112 v.reset(OpAMD64LEAQ4)
9113 v.AuxInt = int32ToAuxInt(c)
9114 v.Aux = symToAux(s)
9115 v.AddArg2(x, y)
9116 return true
9117 }
9118
9119
9120 for {
9121 c := auxIntToInt32(v.AuxInt)
9122 s := auxToSym(v.Aux)
9123 x := v_0
9124 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9125 break
9126 }
9127 y := v_1.Args[0]
9128 v.reset(OpAMD64LEAQ8)
9129 v.AuxInt = int32ToAuxInt(c)
9130 v.Aux = symToAux(s)
9131 v.AddArg2(x, y)
9132 return true
9133 }
9134
9135
9136
9137 for {
9138 off1 := auxIntToInt32(v.AuxInt)
9139 sym1 := auxToSym(v.Aux)
9140 if v_0.Op != OpAMD64LEAQ {
9141 break
9142 }
9143 off2 := auxIntToInt32(v_0.AuxInt)
9144 sym2 := auxToSym(v_0.Aux)
9145 x := v_0.Args[0]
9146 y := v_1
9147 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9148 break
9149 }
9150 v.reset(OpAMD64LEAQ2)
9151 v.AuxInt = int32ToAuxInt(off1 + off2)
9152 v.Aux = symToAux(mergeSym(sym1, sym2))
9153 v.AddArg2(x, y)
9154 return true
9155 }
9156
9157
9158
9159 for {
9160 off1 := auxIntToInt32(v.AuxInt)
9161 sym1 := auxToSym(v.Aux)
9162 x := v_0
9163 if v_1.Op != OpAMD64LEAQ1 {
9164 break
9165 }
9166 off2 := auxIntToInt32(v_1.AuxInt)
9167 sym2 := auxToSym(v_1.Aux)
9168 y := v_1.Args[1]
9169 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9170 break
9171 }
9172 v.reset(OpAMD64LEAQ4)
9173 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9174 v.Aux = symToAux(sym1)
9175 v.AddArg2(x, y)
9176 return true
9177 }
9178
9179
9180
9181 for {
9182 off := auxIntToInt32(v.AuxInt)
9183 sym := auxToSym(v.Aux)
9184 x := v_0
9185 if v_1.Op != OpAMD64MOVQconst {
9186 break
9187 }
9188 scale := auxIntToInt64(v_1.AuxInt)
9189 if !(is32Bit(int64(off) + int64(scale)*2)) {
9190 break
9191 }
9192 v.reset(OpAMD64LEAQ)
9193 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9194 v.Aux = symToAux(sym)
9195 v.AddArg(x)
9196 return true
9197 }
9198
9199
9200
9201 for {
9202 off := auxIntToInt32(v.AuxInt)
9203 sym := auxToSym(v.Aux)
9204 x := v_0
9205 if v_1.Op != OpAMD64MOVLconst {
9206 break
9207 }
9208 scale := auxIntToInt32(v_1.AuxInt)
9209 if !(is32Bit(int64(off) + int64(scale)*2)) {
9210 break
9211 }
9212 v.reset(OpAMD64LEAQ)
9213 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9214 v.Aux = symToAux(sym)
9215 v.AddArg(x)
9216 return true
9217 }
9218 return false
9219 }
9220 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9221 v_1 := v.Args[1]
9222 v_0 := v.Args[0]
9223
9224
9225
9226 for {
9227 c := auxIntToInt32(v.AuxInt)
9228 s := auxToSym(v.Aux)
9229 if v_0.Op != OpAMD64ADDQconst {
9230 break
9231 }
9232 d := auxIntToInt32(v_0.AuxInt)
9233 x := v_0.Args[0]
9234 y := v_1
9235 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9236 break
9237 }
9238 v.reset(OpAMD64LEAQ4)
9239 v.AuxInt = int32ToAuxInt(c + d)
9240 v.Aux = symToAux(s)
9241 v.AddArg2(x, y)
9242 return true
9243 }
9244
9245
9246
9247 for {
9248 c := auxIntToInt32(v.AuxInt)
9249 s := auxToSym(v.Aux)
9250 x := v_0
9251 if v_1.Op != OpAMD64ADDQconst {
9252 break
9253 }
9254 d := auxIntToInt32(v_1.AuxInt)
9255 y := v_1.Args[0]
9256 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9257 break
9258 }
9259 v.reset(OpAMD64LEAQ4)
9260 v.AuxInt = int32ToAuxInt(c + 4*d)
9261 v.Aux = symToAux(s)
9262 v.AddArg2(x, y)
9263 return true
9264 }
9265
9266
9267 for {
9268 c := auxIntToInt32(v.AuxInt)
9269 s := auxToSym(v.Aux)
9270 x := v_0
9271 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
9272 break
9273 }
9274 y := v_1.Args[0]
9275 v.reset(OpAMD64LEAQ8)
9276 v.AuxInt = int32ToAuxInt(c)
9277 v.Aux = symToAux(s)
9278 v.AddArg2(x, y)
9279 return true
9280 }
9281
9282
9283
9284 for {
9285 off1 := auxIntToInt32(v.AuxInt)
9286 sym1 := auxToSym(v.Aux)
9287 if v_0.Op != OpAMD64LEAQ {
9288 break
9289 }
9290 off2 := auxIntToInt32(v_0.AuxInt)
9291 sym2 := auxToSym(v_0.Aux)
9292 x := v_0.Args[0]
9293 y := v_1
9294 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9295 break
9296 }
9297 v.reset(OpAMD64LEAQ4)
9298 v.AuxInt = int32ToAuxInt(off1 + off2)
9299 v.Aux = symToAux(mergeSym(sym1, sym2))
9300 v.AddArg2(x, y)
9301 return true
9302 }
9303
9304
9305
9306 for {
9307 off1 := auxIntToInt32(v.AuxInt)
9308 sym1 := auxToSym(v.Aux)
9309 x := v_0
9310 if v_1.Op != OpAMD64LEAQ1 {
9311 break
9312 }
9313 off2 := auxIntToInt32(v_1.AuxInt)
9314 sym2 := auxToSym(v_1.Aux)
9315 y := v_1.Args[1]
9316 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9317 break
9318 }
9319 v.reset(OpAMD64LEAQ8)
9320 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9321 v.Aux = symToAux(sym1)
9322 v.AddArg2(x, y)
9323 return true
9324 }
9325
9326
9327
9328 for {
9329 off := auxIntToInt32(v.AuxInt)
9330 sym := auxToSym(v.Aux)
9331 x := v_0
9332 if v_1.Op != OpAMD64MOVQconst {
9333 break
9334 }
9335 scale := auxIntToInt64(v_1.AuxInt)
9336 if !(is32Bit(int64(off) + int64(scale)*4)) {
9337 break
9338 }
9339 v.reset(OpAMD64LEAQ)
9340 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9341 v.Aux = symToAux(sym)
9342 v.AddArg(x)
9343 return true
9344 }
9345
9346
9347
9348 for {
9349 off := auxIntToInt32(v.AuxInt)
9350 sym := auxToSym(v.Aux)
9351 x := v_0
9352 if v_1.Op != OpAMD64MOVLconst {
9353 break
9354 }
9355 scale := auxIntToInt32(v_1.AuxInt)
9356 if !(is32Bit(int64(off) + int64(scale)*4)) {
9357 break
9358 }
9359 v.reset(OpAMD64LEAQ)
9360 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9361 v.Aux = symToAux(sym)
9362 v.AddArg(x)
9363 return true
9364 }
9365 return false
9366 }
9367 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9368 v_1 := v.Args[1]
9369 v_0 := v.Args[0]
9370
9371
9372
9373 for {
9374 c := auxIntToInt32(v.AuxInt)
9375 s := auxToSym(v.Aux)
9376 if v_0.Op != OpAMD64ADDQconst {
9377 break
9378 }
9379 d := auxIntToInt32(v_0.AuxInt)
9380 x := v_0.Args[0]
9381 y := v_1
9382 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9383 break
9384 }
9385 v.reset(OpAMD64LEAQ8)
9386 v.AuxInt = int32ToAuxInt(c + d)
9387 v.Aux = symToAux(s)
9388 v.AddArg2(x, y)
9389 return true
9390 }
9391
9392
9393
9394 for {
9395 c := auxIntToInt32(v.AuxInt)
9396 s := auxToSym(v.Aux)
9397 x := v_0
9398 if v_1.Op != OpAMD64ADDQconst {
9399 break
9400 }
9401 d := auxIntToInt32(v_1.AuxInt)
9402 y := v_1.Args[0]
9403 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9404 break
9405 }
9406 v.reset(OpAMD64LEAQ8)
9407 v.AuxInt = int32ToAuxInt(c + 8*d)
9408 v.Aux = symToAux(s)
9409 v.AddArg2(x, y)
9410 return true
9411 }
9412
9413
9414
9415 for {
9416 off1 := auxIntToInt32(v.AuxInt)
9417 sym1 := auxToSym(v.Aux)
9418 if v_0.Op != OpAMD64LEAQ {
9419 break
9420 }
9421 off2 := auxIntToInt32(v_0.AuxInt)
9422 sym2 := auxToSym(v_0.Aux)
9423 x := v_0.Args[0]
9424 y := v_1
9425 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9426 break
9427 }
9428 v.reset(OpAMD64LEAQ8)
9429 v.AuxInt = int32ToAuxInt(off1 + off2)
9430 v.Aux = symToAux(mergeSym(sym1, sym2))
9431 v.AddArg2(x, y)
9432 return true
9433 }
9434
9435
9436
9437 for {
9438 off := auxIntToInt32(v.AuxInt)
9439 sym := auxToSym(v.Aux)
9440 x := v_0
9441 if v_1.Op != OpAMD64MOVQconst {
9442 break
9443 }
9444 scale := auxIntToInt64(v_1.AuxInt)
9445 if !(is32Bit(int64(off) + int64(scale)*8)) {
9446 break
9447 }
9448 v.reset(OpAMD64LEAQ)
9449 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9450 v.Aux = symToAux(sym)
9451 v.AddArg(x)
9452 return true
9453 }
9454
9455
9456
9457 for {
9458 off := auxIntToInt32(v.AuxInt)
9459 sym := auxToSym(v.Aux)
9460 x := v_0
9461 if v_1.Op != OpAMD64MOVLconst {
9462 break
9463 }
9464 scale := auxIntToInt32(v_1.AuxInt)
9465 if !(is32Bit(int64(off) + int64(scale)*8)) {
9466 break
9467 }
9468 v.reset(OpAMD64LEAQ)
9469 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9470 v.Aux = symToAux(sym)
9471 v.AddArg(x)
9472 return true
9473 }
9474 return false
9475 }
9476 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9477 v_2 := v.Args[2]
9478 v_1 := v.Args[1]
9479 v_0 := v.Args[0]
9480
9481
9482 for {
9483 i := auxIntToInt32(v.AuxInt)
9484 s := auxToSym(v.Aux)
9485 p := v_0
9486 if v_1.Op != OpAMD64BSWAPL {
9487 break
9488 }
9489 x := v_1.Args[0]
9490 m := v_2
9491 v.reset(OpAMD64MOVLstore)
9492 v.AuxInt = int32ToAuxInt(i)
9493 v.Aux = symToAux(s)
9494 v.AddArg3(p, x, m)
9495 return true
9496 }
9497 return false
9498 }
9499 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9500 v_2 := v.Args[2]
9501 v_1 := v.Args[1]
9502 v_0 := v.Args[0]
9503
9504
9505 for {
9506 i := auxIntToInt32(v.AuxInt)
9507 s := auxToSym(v.Aux)
9508 p := v_0
9509 if v_1.Op != OpAMD64BSWAPQ {
9510 break
9511 }
9512 x := v_1.Args[0]
9513 m := v_2
9514 v.reset(OpAMD64MOVQstore)
9515 v.AuxInt = int32ToAuxInt(i)
9516 v.Aux = symToAux(s)
9517 v.AddArg3(p, x, m)
9518 return true
9519 }
9520 return false
9521 }
9522 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9523 v_0 := v.Args[0]
9524 b := v.Block
9525
9526
9527
9528 for {
9529 x := v_0
9530 if x.Op != OpAMD64MOVBload {
9531 break
9532 }
9533 off := auxIntToInt32(x.AuxInt)
9534 sym := auxToSym(x.Aux)
9535 mem := x.Args[1]
9536 ptr := x.Args[0]
9537 if !(x.Uses == 1 && clobber(x)) {
9538 break
9539 }
9540 b = x.Block
9541 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9542 v.copyOf(v0)
9543 v0.AuxInt = int32ToAuxInt(off)
9544 v0.Aux = symToAux(sym)
9545 v0.AddArg2(ptr, mem)
9546 return true
9547 }
9548
9549
9550
9551 for {
9552 x := v_0
9553 if x.Op != OpAMD64MOVWload {
9554 break
9555 }
9556 off := auxIntToInt32(x.AuxInt)
9557 sym := auxToSym(x.Aux)
9558 mem := x.Args[1]
9559 ptr := x.Args[0]
9560 if !(x.Uses == 1 && clobber(x)) {
9561 break
9562 }
9563 b = x.Block
9564 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9565 v.copyOf(v0)
9566 v0.AuxInt = int32ToAuxInt(off)
9567 v0.Aux = symToAux(sym)
9568 v0.AddArg2(ptr, mem)
9569 return true
9570 }
9571
9572
9573
9574 for {
9575 x := v_0
9576 if x.Op != OpAMD64MOVLload {
9577 break
9578 }
9579 off := auxIntToInt32(x.AuxInt)
9580 sym := auxToSym(x.Aux)
9581 mem := x.Args[1]
9582 ptr := x.Args[0]
9583 if !(x.Uses == 1 && clobber(x)) {
9584 break
9585 }
9586 b = x.Block
9587 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9588 v.copyOf(v0)
9589 v0.AuxInt = int32ToAuxInt(off)
9590 v0.Aux = symToAux(sym)
9591 v0.AddArg2(ptr, mem)
9592 return true
9593 }
9594
9595
9596
9597 for {
9598 x := v_0
9599 if x.Op != OpAMD64MOVQload {
9600 break
9601 }
9602 off := auxIntToInt32(x.AuxInt)
9603 sym := auxToSym(x.Aux)
9604 mem := x.Args[1]
9605 ptr := x.Args[0]
9606 if !(x.Uses == 1 && clobber(x)) {
9607 break
9608 }
9609 b = x.Block
9610 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9611 v.copyOf(v0)
9612 v0.AuxInt = int32ToAuxInt(off)
9613 v0.Aux = symToAux(sym)
9614 v0.AddArg2(ptr, mem)
9615 return true
9616 }
9617
9618
9619
9620 for {
9621 if v_0.Op != OpAMD64ANDLconst {
9622 break
9623 }
9624 c := auxIntToInt32(v_0.AuxInt)
9625 x := v_0.Args[0]
9626 if !(c&0x80 == 0) {
9627 break
9628 }
9629 v.reset(OpAMD64ANDLconst)
9630 v.AuxInt = int32ToAuxInt(c & 0x7f)
9631 v.AddArg(x)
9632 return true
9633 }
9634
9635
9636 for {
9637 if v_0.Op != OpAMD64MOVBQSX {
9638 break
9639 }
9640 x := v_0.Args[0]
9641 v.reset(OpAMD64MOVBQSX)
9642 v.AddArg(x)
9643 return true
9644 }
9645 return false
9646 }
9647 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9648 v_1 := v.Args[1]
9649 v_0 := v.Args[0]
9650
9651
9652
9653 for {
9654 off := auxIntToInt32(v.AuxInt)
9655 sym := auxToSym(v.Aux)
9656 ptr := v_0
9657 if v_1.Op != OpAMD64MOVBstore {
9658 break
9659 }
9660 off2 := auxIntToInt32(v_1.AuxInt)
9661 sym2 := auxToSym(v_1.Aux)
9662 x := v_1.Args[1]
9663 ptr2 := v_1.Args[0]
9664 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9665 break
9666 }
9667 v.reset(OpAMD64MOVBQSX)
9668 v.AddArg(x)
9669 return true
9670 }
9671
9672
9673
9674 for {
9675 off1 := auxIntToInt32(v.AuxInt)
9676 sym1 := auxToSym(v.Aux)
9677 if v_0.Op != OpAMD64LEAQ {
9678 break
9679 }
9680 off2 := auxIntToInt32(v_0.AuxInt)
9681 sym2 := auxToSym(v_0.Aux)
9682 base := v_0.Args[0]
9683 mem := v_1
9684 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9685 break
9686 }
9687 v.reset(OpAMD64MOVBQSXload)
9688 v.AuxInt = int32ToAuxInt(off1 + off2)
9689 v.Aux = symToAux(mergeSym(sym1, sym2))
9690 v.AddArg2(base, mem)
9691 return true
9692 }
9693 return false
9694 }
9695 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9696 v_0 := v.Args[0]
9697 b := v.Block
9698
9699
9700
9701 for {
9702 x := v_0
9703 if x.Op != OpAMD64MOVBload {
9704 break
9705 }
9706 off := auxIntToInt32(x.AuxInt)
9707 sym := auxToSym(x.Aux)
9708 mem := x.Args[1]
9709 ptr := x.Args[0]
9710 if !(x.Uses == 1 && clobber(x)) {
9711 break
9712 }
9713 b = x.Block
9714 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9715 v.copyOf(v0)
9716 v0.AuxInt = int32ToAuxInt(off)
9717 v0.Aux = symToAux(sym)
9718 v0.AddArg2(ptr, mem)
9719 return true
9720 }
9721
9722
9723
9724 for {
9725 x := v_0
9726 if x.Op != OpAMD64MOVWload {
9727 break
9728 }
9729 off := auxIntToInt32(x.AuxInt)
9730 sym := auxToSym(x.Aux)
9731 mem := x.Args[1]
9732 ptr := x.Args[0]
9733 if !(x.Uses == 1 && clobber(x)) {
9734 break
9735 }
9736 b = x.Block
9737 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9738 v.copyOf(v0)
9739 v0.AuxInt = int32ToAuxInt(off)
9740 v0.Aux = symToAux(sym)
9741 v0.AddArg2(ptr, mem)
9742 return true
9743 }
9744
9745
9746
9747 for {
9748 x := v_0
9749 if x.Op != OpAMD64MOVLload {
9750 break
9751 }
9752 off := auxIntToInt32(x.AuxInt)
9753 sym := auxToSym(x.Aux)
9754 mem := x.Args[1]
9755 ptr := x.Args[0]
9756 if !(x.Uses == 1 && clobber(x)) {
9757 break
9758 }
9759 b = x.Block
9760 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9761 v.copyOf(v0)
9762 v0.AuxInt = int32ToAuxInt(off)
9763 v0.Aux = symToAux(sym)
9764 v0.AddArg2(ptr, mem)
9765 return true
9766 }
9767
9768
9769
9770 for {
9771 x := v_0
9772 if x.Op != OpAMD64MOVQload {
9773 break
9774 }
9775 off := auxIntToInt32(x.AuxInt)
9776 sym := auxToSym(x.Aux)
9777 mem := x.Args[1]
9778 ptr := x.Args[0]
9779 if !(x.Uses == 1 && clobber(x)) {
9780 break
9781 }
9782 b = x.Block
9783 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9784 v.copyOf(v0)
9785 v0.AuxInt = int32ToAuxInt(off)
9786 v0.Aux = symToAux(sym)
9787 v0.AddArg2(ptr, mem)
9788 return true
9789 }
9790
9791
9792
9793 for {
9794 x := v_0
9795 if !(zeroUpper56Bits(x, 3)) {
9796 break
9797 }
9798 v.copyOf(x)
9799 return true
9800 }
9801
9802
9803 for {
9804 if v_0.Op != OpAMD64ANDLconst {
9805 break
9806 }
9807 c := auxIntToInt32(v_0.AuxInt)
9808 x := v_0.Args[0]
9809 v.reset(OpAMD64ANDLconst)
9810 v.AuxInt = int32ToAuxInt(c & 0xff)
9811 v.AddArg(x)
9812 return true
9813 }
9814
9815
9816 for {
9817 if v_0.Op != OpAMD64MOVBQZX {
9818 break
9819 }
9820 x := v_0.Args[0]
9821 v.reset(OpAMD64MOVBQZX)
9822 v.AddArg(x)
9823 return true
9824 }
9825 return false
9826 }
9827 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
9828 v_1 := v.Args[1]
9829 v_0 := v.Args[0]
9830
9831
9832
9833 for {
9834 off1 := auxIntToInt32(v.AuxInt)
9835 sym := auxToSym(v.Aux)
9836 if v_0.Op != OpAMD64ADDQconst {
9837 break
9838 }
9839 off2 := auxIntToInt32(v_0.AuxInt)
9840 ptr := v_0.Args[0]
9841 mem := v_1
9842 if !(is32Bit(int64(off1) + int64(off2))) {
9843 break
9844 }
9845 v.reset(OpAMD64MOVBatomicload)
9846 v.AuxInt = int32ToAuxInt(off1 + off2)
9847 v.Aux = symToAux(sym)
9848 v.AddArg2(ptr, mem)
9849 return true
9850 }
9851
9852
9853
9854 for {
9855 off1 := auxIntToInt32(v.AuxInt)
9856 sym1 := auxToSym(v.Aux)
9857 if v_0.Op != OpAMD64LEAQ {
9858 break
9859 }
9860 off2 := auxIntToInt32(v_0.AuxInt)
9861 sym2 := auxToSym(v_0.Aux)
9862 ptr := v_0.Args[0]
9863 mem := v_1
9864 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9865 break
9866 }
9867 v.reset(OpAMD64MOVBatomicload)
9868 v.AuxInt = int32ToAuxInt(off1 + off2)
9869 v.Aux = symToAux(mergeSym(sym1, sym2))
9870 v.AddArg2(ptr, mem)
9871 return true
9872 }
9873 return false
9874 }
9875 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
9876 v_1 := v.Args[1]
9877 v_0 := v.Args[0]
9878
9879
9880
9881 for {
9882 off := auxIntToInt32(v.AuxInt)
9883 sym := auxToSym(v.Aux)
9884 ptr := v_0
9885 if v_1.Op != OpAMD64MOVBstore {
9886 break
9887 }
9888 off2 := auxIntToInt32(v_1.AuxInt)
9889 sym2 := auxToSym(v_1.Aux)
9890 x := v_1.Args[1]
9891 ptr2 := v_1.Args[0]
9892 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9893 break
9894 }
9895 v.reset(OpAMD64MOVBQZX)
9896 v.AddArg(x)
9897 return true
9898 }
9899
9900
9901
9902 for {
9903 off1 := auxIntToInt32(v.AuxInt)
9904 sym := auxToSym(v.Aux)
9905 if v_0.Op != OpAMD64ADDQconst {
9906 break
9907 }
9908 off2 := auxIntToInt32(v_0.AuxInt)
9909 ptr := v_0.Args[0]
9910 mem := v_1
9911 if !(is32Bit(int64(off1) + int64(off2))) {
9912 break
9913 }
9914 v.reset(OpAMD64MOVBload)
9915 v.AuxInt = int32ToAuxInt(off1 + off2)
9916 v.Aux = symToAux(sym)
9917 v.AddArg2(ptr, mem)
9918 return true
9919 }
9920
9921
9922
9923 for {
9924 off1 := auxIntToInt32(v.AuxInt)
9925 sym1 := auxToSym(v.Aux)
9926 if v_0.Op != OpAMD64LEAQ {
9927 break
9928 }
9929 off2 := auxIntToInt32(v_0.AuxInt)
9930 sym2 := auxToSym(v_0.Aux)
9931 base := v_0.Args[0]
9932 mem := v_1
9933 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9934 break
9935 }
9936 v.reset(OpAMD64MOVBload)
9937 v.AuxInt = int32ToAuxInt(off1 + off2)
9938 v.Aux = symToAux(mergeSym(sym1, sym2))
9939 v.AddArg2(base, mem)
9940 return true
9941 }
9942
9943
9944
9945 for {
9946 off := auxIntToInt32(v.AuxInt)
9947 sym := auxToSym(v.Aux)
9948 if v_0.Op != OpSB || !(symIsRO(sym)) {
9949 break
9950 }
9951 v.reset(OpAMD64MOVLconst)
9952 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
9953 return true
9954 }
9955 return false
9956 }
9957 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
9958 v_2 := v.Args[2]
9959 v_1 := v.Args[1]
9960 v_0 := v.Args[0]
9961 b := v.Block
9962 typ := &b.Func.Config.Types
9963
9964
9965
9966 for {
9967 off := auxIntToInt32(v.AuxInt)
9968 sym := auxToSym(v.Aux)
9969 ptr := v_0
9970 y := v_1
9971 if y.Op != OpAMD64SETL {
9972 break
9973 }
9974 x := y.Args[0]
9975 mem := v_2
9976 if !(y.Uses == 1) {
9977 break
9978 }
9979 v.reset(OpAMD64SETLstore)
9980 v.AuxInt = int32ToAuxInt(off)
9981 v.Aux = symToAux(sym)
9982 v.AddArg3(ptr, x, mem)
9983 return true
9984 }
9985
9986
9987
9988 for {
9989 off := auxIntToInt32(v.AuxInt)
9990 sym := auxToSym(v.Aux)
9991 ptr := v_0
9992 y := v_1
9993 if y.Op != OpAMD64SETLE {
9994 break
9995 }
9996 x := y.Args[0]
9997 mem := v_2
9998 if !(y.Uses == 1) {
9999 break
10000 }
10001 v.reset(OpAMD64SETLEstore)
10002 v.AuxInt = int32ToAuxInt(off)
10003 v.Aux = symToAux(sym)
10004 v.AddArg3(ptr, x, mem)
10005 return true
10006 }
10007
10008
10009
10010 for {
10011 off := auxIntToInt32(v.AuxInt)
10012 sym := auxToSym(v.Aux)
10013 ptr := v_0
10014 y := v_1
10015 if y.Op != OpAMD64SETG {
10016 break
10017 }
10018 x := y.Args[0]
10019 mem := v_2
10020 if !(y.Uses == 1) {
10021 break
10022 }
10023 v.reset(OpAMD64SETGstore)
10024 v.AuxInt = int32ToAuxInt(off)
10025 v.Aux = symToAux(sym)
10026 v.AddArg3(ptr, x, mem)
10027 return true
10028 }
10029
10030
10031
10032 for {
10033 off := auxIntToInt32(v.AuxInt)
10034 sym := auxToSym(v.Aux)
10035 ptr := v_0
10036 y := v_1
10037 if y.Op != OpAMD64SETGE {
10038 break
10039 }
10040 x := y.Args[0]
10041 mem := v_2
10042 if !(y.Uses == 1) {
10043 break
10044 }
10045 v.reset(OpAMD64SETGEstore)
10046 v.AuxInt = int32ToAuxInt(off)
10047 v.Aux = symToAux(sym)
10048 v.AddArg3(ptr, x, mem)
10049 return true
10050 }
10051
10052
10053
10054 for {
10055 off := auxIntToInt32(v.AuxInt)
10056 sym := auxToSym(v.Aux)
10057 ptr := v_0
10058 y := v_1
10059 if y.Op != OpAMD64SETEQ {
10060 break
10061 }
10062 x := y.Args[0]
10063 mem := v_2
10064 if !(y.Uses == 1) {
10065 break
10066 }
10067 v.reset(OpAMD64SETEQstore)
10068 v.AuxInt = int32ToAuxInt(off)
10069 v.Aux = symToAux(sym)
10070 v.AddArg3(ptr, x, mem)
10071 return true
10072 }
10073
10074
10075
10076 for {
10077 off := auxIntToInt32(v.AuxInt)
10078 sym := auxToSym(v.Aux)
10079 ptr := v_0
10080 y := v_1
10081 if y.Op != OpAMD64SETNE {
10082 break
10083 }
10084 x := y.Args[0]
10085 mem := v_2
10086 if !(y.Uses == 1) {
10087 break
10088 }
10089 v.reset(OpAMD64SETNEstore)
10090 v.AuxInt = int32ToAuxInt(off)
10091 v.Aux = symToAux(sym)
10092 v.AddArg3(ptr, x, mem)
10093 return true
10094 }
10095
10096
10097
10098 for {
10099 off := auxIntToInt32(v.AuxInt)
10100 sym := auxToSym(v.Aux)
10101 ptr := v_0
10102 y := v_1
10103 if y.Op != OpAMD64SETB {
10104 break
10105 }
10106 x := y.Args[0]
10107 mem := v_2
10108 if !(y.Uses == 1) {
10109 break
10110 }
10111 v.reset(OpAMD64SETBstore)
10112 v.AuxInt = int32ToAuxInt(off)
10113 v.Aux = symToAux(sym)
10114 v.AddArg3(ptr, x, mem)
10115 return true
10116 }
10117
10118
10119
10120 for {
10121 off := auxIntToInt32(v.AuxInt)
10122 sym := auxToSym(v.Aux)
10123 ptr := v_0
10124 y := v_1
10125 if y.Op != OpAMD64SETBE {
10126 break
10127 }
10128 x := y.Args[0]
10129 mem := v_2
10130 if !(y.Uses == 1) {
10131 break
10132 }
10133 v.reset(OpAMD64SETBEstore)
10134 v.AuxInt = int32ToAuxInt(off)
10135 v.Aux = symToAux(sym)
10136 v.AddArg3(ptr, x, mem)
10137 return true
10138 }
10139
10140
10141
10142 for {
10143 off := auxIntToInt32(v.AuxInt)
10144 sym := auxToSym(v.Aux)
10145 ptr := v_0
10146 y := v_1
10147 if y.Op != OpAMD64SETA {
10148 break
10149 }
10150 x := y.Args[0]
10151 mem := v_2
10152 if !(y.Uses == 1) {
10153 break
10154 }
10155 v.reset(OpAMD64SETAstore)
10156 v.AuxInt = int32ToAuxInt(off)
10157 v.Aux = symToAux(sym)
10158 v.AddArg3(ptr, x, mem)
10159 return true
10160 }
10161
10162
10163
10164 for {
10165 off := auxIntToInt32(v.AuxInt)
10166 sym := auxToSym(v.Aux)
10167 ptr := v_0
10168 y := v_1
10169 if y.Op != OpAMD64SETAE {
10170 break
10171 }
10172 x := y.Args[0]
10173 mem := v_2
10174 if !(y.Uses == 1) {
10175 break
10176 }
10177 v.reset(OpAMD64SETAEstore)
10178 v.AuxInt = int32ToAuxInt(off)
10179 v.Aux = symToAux(sym)
10180 v.AddArg3(ptr, x, mem)
10181 return true
10182 }
10183
10184
10185 for {
10186 off := auxIntToInt32(v.AuxInt)
10187 sym := auxToSym(v.Aux)
10188 ptr := v_0
10189 if v_1.Op != OpAMD64MOVBQSX {
10190 break
10191 }
10192 x := v_1.Args[0]
10193 mem := v_2
10194 v.reset(OpAMD64MOVBstore)
10195 v.AuxInt = int32ToAuxInt(off)
10196 v.Aux = symToAux(sym)
10197 v.AddArg3(ptr, x, mem)
10198 return true
10199 }
10200
10201
10202 for {
10203 off := auxIntToInt32(v.AuxInt)
10204 sym := auxToSym(v.Aux)
10205 ptr := v_0
10206 if v_1.Op != OpAMD64MOVBQZX {
10207 break
10208 }
10209 x := v_1.Args[0]
10210 mem := v_2
10211 v.reset(OpAMD64MOVBstore)
10212 v.AuxInt = int32ToAuxInt(off)
10213 v.Aux = symToAux(sym)
10214 v.AddArg3(ptr, x, mem)
10215 return true
10216 }
10217
10218
10219
10220 for {
10221 off1 := auxIntToInt32(v.AuxInt)
10222 sym := auxToSym(v.Aux)
10223 if v_0.Op != OpAMD64ADDQconst {
10224 break
10225 }
10226 off2 := auxIntToInt32(v_0.AuxInt)
10227 ptr := v_0.Args[0]
10228 val := v_1
10229 mem := v_2
10230 if !(is32Bit(int64(off1) + int64(off2))) {
10231 break
10232 }
10233 v.reset(OpAMD64MOVBstore)
10234 v.AuxInt = int32ToAuxInt(off1 + off2)
10235 v.Aux = symToAux(sym)
10236 v.AddArg3(ptr, val, mem)
10237 return true
10238 }
10239
10240
10241 for {
10242 off := auxIntToInt32(v.AuxInt)
10243 sym := auxToSym(v.Aux)
10244 ptr := v_0
10245 if v_1.Op != OpAMD64MOVLconst {
10246 break
10247 }
10248 c := auxIntToInt32(v_1.AuxInt)
10249 mem := v_2
10250 v.reset(OpAMD64MOVBstoreconst)
10251 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10252 v.Aux = symToAux(sym)
10253 v.AddArg2(ptr, mem)
10254 return true
10255 }
10256
10257
10258 for {
10259 off := auxIntToInt32(v.AuxInt)
10260 sym := auxToSym(v.Aux)
10261 ptr := v_0
10262 if v_1.Op != OpAMD64MOVQconst {
10263 break
10264 }
10265 c := auxIntToInt64(v_1.AuxInt)
10266 mem := v_2
10267 v.reset(OpAMD64MOVBstoreconst)
10268 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10269 v.Aux = symToAux(sym)
10270 v.AddArg2(ptr, mem)
10271 return true
10272 }
10273
10274
10275
10276 for {
10277 off1 := auxIntToInt32(v.AuxInt)
10278 sym1 := auxToSym(v.Aux)
10279 if v_0.Op != OpAMD64LEAQ {
10280 break
10281 }
10282 off2 := auxIntToInt32(v_0.AuxInt)
10283 sym2 := auxToSym(v_0.Aux)
10284 base := v_0.Args[0]
10285 val := v_1
10286 mem := v_2
10287 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10288 break
10289 }
10290 v.reset(OpAMD64MOVBstore)
10291 v.AuxInt = int32ToAuxInt(off1 + off2)
10292 v.Aux = symToAux(mergeSym(sym1, sym2))
10293 v.AddArg3(base, val, mem)
10294 return true
10295 }
10296
10297
10298
10299 for {
10300 i := auxIntToInt32(v.AuxInt)
10301 s := auxToSym(v.Aux)
10302 p := v_0
10303 w := v_1
10304 x0 := v_2
10305 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
10306 break
10307 }
10308 mem := x0.Args[2]
10309 if p != x0.Args[0] {
10310 break
10311 }
10312 x0_1 := x0.Args[1]
10313 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
10314 break
10315 }
10316 v.reset(OpAMD64MOVWstore)
10317 v.AuxInt = int32ToAuxInt(i - 1)
10318 v.Aux = symToAux(s)
10319 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10320 v0.AuxInt = int8ToAuxInt(8)
10321 v0.AddArg(w)
10322 v.AddArg3(p, v0, mem)
10323 return true
10324 }
10325
10326
10327
10328 for {
10329 i := auxIntToInt32(v.AuxInt)
10330 s := auxToSym(v.Aux)
10331 p1 := v_0
10332 w := v_1
10333 x0 := v_2
10334 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10335 break
10336 }
10337 mem := x0.Args[2]
10338 p0 := x0.Args[0]
10339 x0_1 := x0.Args[1]
10340 if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
10341 break
10342 }
10343 v.reset(OpAMD64MOVWstore)
10344 v.AuxInt = int32ToAuxInt(i)
10345 v.Aux = symToAux(s)
10346 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
10347 v0.AuxInt = int8ToAuxInt(8)
10348 v0.AddArg(w)
10349 v.AddArg3(p0, v0, mem)
10350 return true
10351 }
10352
10353
10354
10355 for {
10356 i := auxIntToInt32(v.AuxInt)
10357 s := auxToSym(v.Aux)
10358 p := v_0
10359 w := v_1
10360 x2 := v_2
10361 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
10362 break
10363 }
10364 _ = x2.Args[2]
10365 if p != x2.Args[0] {
10366 break
10367 }
10368 x2_1 := x2.Args[1]
10369 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10370 break
10371 }
10372 x1 := x2.Args[2]
10373 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
10374 break
10375 }
10376 _ = x1.Args[2]
10377 if p != x1.Args[0] {
10378 break
10379 }
10380 x1_1 := x1.Args[1]
10381 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10382 break
10383 }
10384 x0 := x1.Args[2]
10385 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
10386 break
10387 }
10388 mem := x0.Args[2]
10389 if p != x0.Args[0] {
10390 break
10391 }
10392 x0_1 := x0.Args[1]
10393 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
10394 break
10395 }
10396 v.reset(OpAMD64MOVLstore)
10397 v.AuxInt = int32ToAuxInt(i - 3)
10398 v.Aux = symToAux(s)
10399 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10400 v0.AddArg(w)
10401 v.AddArg3(p, v0, mem)
10402 return true
10403 }
10404
10405
10406
10407 for {
10408 i := auxIntToInt32(v.AuxInt)
10409 s := auxToSym(v.Aux)
10410 p3 := v_0
10411 w := v_1
10412 x2 := v_2
10413 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10414 break
10415 }
10416 _ = x2.Args[2]
10417 p2 := x2.Args[0]
10418 x2_1 := x2.Args[1]
10419 if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
10420 break
10421 }
10422 x1 := x2.Args[2]
10423 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10424 break
10425 }
10426 _ = x1.Args[2]
10427 p1 := x1.Args[0]
10428 x1_1 := x1.Args[1]
10429 if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
10430 break
10431 }
10432 x0 := x1.Args[2]
10433 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10434 break
10435 }
10436 mem := x0.Args[2]
10437 p0 := x0.Args[0]
10438 x0_1 := x0.Args[1]
10439 if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
10440 break
10441 }
10442 v.reset(OpAMD64MOVLstore)
10443 v.AuxInt = int32ToAuxInt(i)
10444 v.Aux = symToAux(s)
10445 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
10446 v0.AddArg(w)
10447 v.AddArg3(p0, v0, mem)
10448 return true
10449 }
10450
10451
10452
10453 for {
10454 i := auxIntToInt32(v.AuxInt)
10455 s := auxToSym(v.Aux)
10456 p := v_0
10457 w := v_1
10458 x6 := v_2
10459 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
10460 break
10461 }
10462 _ = x6.Args[2]
10463 if p != x6.Args[0] {
10464 break
10465 }
10466 x6_1 := x6.Args[1]
10467 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10468 break
10469 }
10470 x5 := x6.Args[2]
10471 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
10472 break
10473 }
10474 _ = x5.Args[2]
10475 if p != x5.Args[0] {
10476 break
10477 }
10478 x5_1 := x5.Args[1]
10479 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10480 break
10481 }
10482 x4 := x5.Args[2]
10483 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
10484 break
10485 }
10486 _ = x4.Args[2]
10487 if p != x4.Args[0] {
10488 break
10489 }
10490 x4_1 := x4.Args[1]
10491 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10492 break
10493 }
10494 x3 := x4.Args[2]
10495 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
10496 break
10497 }
10498 _ = x3.Args[2]
10499 if p != x3.Args[0] {
10500 break
10501 }
10502 x3_1 := x3.Args[1]
10503 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10504 break
10505 }
10506 x2 := x3.Args[2]
10507 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
10508 break
10509 }
10510 _ = x2.Args[2]
10511 if p != x2.Args[0] {
10512 break
10513 }
10514 x2_1 := x2.Args[1]
10515 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10516 break
10517 }
10518 x1 := x2.Args[2]
10519 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
10520 break
10521 }
10522 _ = x1.Args[2]
10523 if p != x1.Args[0] {
10524 break
10525 }
10526 x1_1 := x1.Args[1]
10527 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10528 break
10529 }
10530 x0 := x1.Args[2]
10531 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
10532 break
10533 }
10534 mem := x0.Args[2]
10535 if p != x0.Args[0] {
10536 break
10537 }
10538 x0_1 := x0.Args[1]
10539 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10540 break
10541 }
10542 v.reset(OpAMD64MOVQstore)
10543 v.AuxInt = int32ToAuxInt(i - 7)
10544 v.Aux = symToAux(s)
10545 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10546 v0.AddArg(w)
10547 v.AddArg3(p, v0, mem)
10548 return true
10549 }
10550
10551
10552
10553 for {
10554 i := auxIntToInt32(v.AuxInt)
10555 s := auxToSym(v.Aux)
10556 p7 := v_0
10557 w := v_1
10558 x6 := v_2
10559 if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
10560 break
10561 }
10562 _ = x6.Args[2]
10563 p6 := x6.Args[0]
10564 x6_1 := x6.Args[1]
10565 if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
10566 break
10567 }
10568 x5 := x6.Args[2]
10569 if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
10570 break
10571 }
10572 _ = x5.Args[2]
10573 p5 := x5.Args[0]
10574 x5_1 := x5.Args[1]
10575 if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
10576 break
10577 }
10578 x4 := x5.Args[2]
10579 if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
10580 break
10581 }
10582 _ = x4.Args[2]
10583 p4 := x4.Args[0]
10584 x4_1 := x4.Args[1]
10585 if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
10586 break
10587 }
10588 x3 := x4.Args[2]
10589 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
10590 break
10591 }
10592 _ = x3.Args[2]
10593 p3 := x3.Args[0]
10594 x3_1 := x3.Args[1]
10595 if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
10596 break
10597 }
10598 x2 := x3.Args[2]
10599 if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
10600 break
10601 }
10602 _ = x2.Args[2]
10603 p2 := x2.Args[0]
10604 x2_1 := x2.Args[1]
10605 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
10606 break
10607 }
10608 x1 := x2.Args[2]
10609 if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
10610 break
10611 }
10612 _ = x1.Args[2]
10613 p1 := x1.Args[0]
10614 x1_1 := x1.Args[1]
10615 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
10616 break
10617 }
10618 x0 := x1.Args[2]
10619 if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
10620 break
10621 }
10622 mem := x0.Args[2]
10623 p0 := x0.Args[0]
10624 x0_1 := x0.Args[1]
10625 if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
10626 break
10627 }
10628 v.reset(OpAMD64MOVQstore)
10629 v.AuxInt = int32ToAuxInt(i)
10630 v.Aux = symToAux(s)
10631 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
10632 v0.AddArg(w)
10633 v.AddArg3(p0, v0, mem)
10634 return true
10635 }
10636
10637
10638
10639 for {
10640 i := auxIntToInt32(v.AuxInt)
10641 s := auxToSym(v.Aux)
10642 p := v_0
10643 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
10644 break
10645 }
10646 w := v_1.Args[0]
10647 x := v_2
10648 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10649 break
10650 }
10651 mem := x.Args[2]
10652 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10653 break
10654 }
10655 v.reset(OpAMD64MOVWstore)
10656 v.AuxInt = int32ToAuxInt(i - 1)
10657 v.Aux = symToAux(s)
10658 v.AddArg3(p, w, mem)
10659 return true
10660 }
10661
10662
10663
10664 for {
10665 i := auxIntToInt32(v.AuxInt)
10666 s := auxToSym(v.Aux)
10667 p := v_0
10668 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
10669 break
10670 }
10671 w := v_1.Args[0]
10672 x := v_2
10673 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10674 break
10675 }
10676 mem := x.Args[2]
10677 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10678 break
10679 }
10680 v.reset(OpAMD64MOVWstore)
10681 v.AuxInt = int32ToAuxInt(i - 1)
10682 v.Aux = symToAux(s)
10683 v.AddArg3(p, w, mem)
10684 return true
10685 }
10686
10687
10688
10689 for {
10690 i := auxIntToInt32(v.AuxInt)
10691 s := auxToSym(v.Aux)
10692 p := v_0
10693 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
10694 break
10695 }
10696 w := v_1.Args[0]
10697 x := v_2
10698 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10699 break
10700 }
10701 mem := x.Args[2]
10702 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
10703 break
10704 }
10705 v.reset(OpAMD64MOVWstore)
10706 v.AuxInt = int32ToAuxInt(i - 1)
10707 v.Aux = symToAux(s)
10708 v.AddArg3(p, w, mem)
10709 return true
10710 }
10711
10712
10713
10714 for {
10715 i := auxIntToInt32(v.AuxInt)
10716 s := auxToSym(v.Aux)
10717 p := v_0
10718 w := v_1
10719 x := v_2
10720 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10721 break
10722 }
10723 mem := x.Args[2]
10724 if p != x.Args[0] {
10725 break
10726 }
10727 x_1 := x.Args[1]
10728 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10729 break
10730 }
10731 v.reset(OpAMD64MOVWstore)
10732 v.AuxInt = int32ToAuxInt(i)
10733 v.Aux = symToAux(s)
10734 v.AddArg3(p, w, mem)
10735 return true
10736 }
10737
10738
10739
10740 for {
10741 i := auxIntToInt32(v.AuxInt)
10742 s := auxToSym(v.Aux)
10743 p := v_0
10744 w := v_1
10745 x := v_2
10746 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10747 break
10748 }
10749 mem := x.Args[2]
10750 if p != x.Args[0] {
10751 break
10752 }
10753 x_1 := x.Args[1]
10754 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10755 break
10756 }
10757 v.reset(OpAMD64MOVWstore)
10758 v.AuxInt = int32ToAuxInt(i)
10759 v.Aux = symToAux(s)
10760 v.AddArg3(p, w, mem)
10761 return true
10762 }
10763
10764
10765
10766 for {
10767 i := auxIntToInt32(v.AuxInt)
10768 s := auxToSym(v.Aux)
10769 p := v_0
10770 w := v_1
10771 x := v_2
10772 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
10773 break
10774 }
10775 mem := x.Args[2]
10776 if p != x.Args[0] {
10777 break
10778 }
10779 x_1 := x.Args[1]
10780 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
10781 break
10782 }
10783 v.reset(OpAMD64MOVWstore)
10784 v.AuxInt = int32ToAuxInt(i)
10785 v.Aux = symToAux(s)
10786 v.AddArg3(p, w, mem)
10787 return true
10788 }
10789
10790
10791
10792 for {
10793 i := auxIntToInt32(v.AuxInt)
10794 s := auxToSym(v.Aux)
10795 p := v_0
10796 if v_1.Op != OpAMD64SHRLconst {
10797 break
10798 }
10799 j := auxIntToInt8(v_1.AuxInt)
10800 w := v_1.Args[0]
10801 x := v_2
10802 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10803 break
10804 }
10805 mem := x.Args[2]
10806 if p != x.Args[0] {
10807 break
10808 }
10809 w0 := x.Args[1]
10810 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
10811 break
10812 }
10813 v.reset(OpAMD64MOVWstore)
10814 v.AuxInt = int32ToAuxInt(i - 1)
10815 v.Aux = symToAux(s)
10816 v.AddArg3(p, w0, mem)
10817 return true
10818 }
10819
10820
10821
10822 for {
10823 i := auxIntToInt32(v.AuxInt)
10824 s := auxToSym(v.Aux)
10825 p := v_0
10826 if v_1.Op != OpAMD64SHRQconst {
10827 break
10828 }
10829 j := auxIntToInt8(v_1.AuxInt)
10830 w := v_1.Args[0]
10831 x := v_2
10832 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
10833 break
10834 }
10835 mem := x.Args[2]
10836 if p != x.Args[0] {
10837 break
10838 }
10839 w0 := x.Args[1]
10840 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
10841 break
10842 }
10843 v.reset(OpAMD64MOVWstore)
10844 v.AuxInt = int32ToAuxInt(i - 1)
10845 v.Aux = symToAux(s)
10846 v.AddArg3(p, w0, mem)
10847 return true
10848 }
10849
10850
10851
10852 for {
10853 i := auxIntToInt32(v.AuxInt)
10854 s := auxToSym(v.Aux)
10855 p1 := v_0
10856 if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
10857 break
10858 }
10859 w := v_1.Args[0]
10860 x := v_2
10861 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10862 break
10863 }
10864 mem := x.Args[2]
10865 p0 := x.Args[0]
10866 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10867 break
10868 }
10869 v.reset(OpAMD64MOVWstore)
10870 v.AuxInt = int32ToAuxInt(i)
10871 v.Aux = symToAux(s)
10872 v.AddArg3(p0, w, mem)
10873 return true
10874 }
10875
10876
10877
10878 for {
10879 i := auxIntToInt32(v.AuxInt)
10880 s := auxToSym(v.Aux)
10881 p1 := v_0
10882 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
10883 break
10884 }
10885 w := v_1.Args[0]
10886 x := v_2
10887 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10888 break
10889 }
10890 mem := x.Args[2]
10891 p0 := x.Args[0]
10892 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10893 break
10894 }
10895 v.reset(OpAMD64MOVWstore)
10896 v.AuxInt = int32ToAuxInt(i)
10897 v.Aux = symToAux(s)
10898 v.AddArg3(p0, w, mem)
10899 return true
10900 }
10901
10902
10903
10904 for {
10905 i := auxIntToInt32(v.AuxInt)
10906 s := auxToSym(v.Aux)
10907 p1 := v_0
10908 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
10909 break
10910 }
10911 w := v_1.Args[0]
10912 x := v_2
10913 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10914 break
10915 }
10916 mem := x.Args[2]
10917 p0 := x.Args[0]
10918 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10919 break
10920 }
10921 v.reset(OpAMD64MOVWstore)
10922 v.AuxInt = int32ToAuxInt(i)
10923 v.Aux = symToAux(s)
10924 v.AddArg3(p0, w, mem)
10925 return true
10926 }
10927
10928
10929
10930 for {
10931 i := auxIntToInt32(v.AuxInt)
10932 s := auxToSym(v.Aux)
10933 p0 := v_0
10934 w := v_1
10935 x := v_2
10936 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10937 break
10938 }
10939 mem := x.Args[2]
10940 p1 := x.Args[0]
10941 x_1 := x.Args[1]
10942 if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10943 break
10944 }
10945 v.reset(OpAMD64MOVWstore)
10946 v.AuxInt = int32ToAuxInt(i)
10947 v.Aux = symToAux(s)
10948 v.AddArg3(p0, w, mem)
10949 return true
10950 }
10951
10952
10953
10954 for {
10955 i := auxIntToInt32(v.AuxInt)
10956 s := auxToSym(v.Aux)
10957 p0 := v_0
10958 w := v_1
10959 x := v_2
10960 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10961 break
10962 }
10963 mem := x.Args[2]
10964 p1 := x.Args[0]
10965 x_1 := x.Args[1]
10966 if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10967 break
10968 }
10969 v.reset(OpAMD64MOVWstore)
10970 v.AuxInt = int32ToAuxInt(i)
10971 v.Aux = symToAux(s)
10972 v.AddArg3(p0, w, mem)
10973 return true
10974 }
10975
10976
10977
10978 for {
10979 i := auxIntToInt32(v.AuxInt)
10980 s := auxToSym(v.Aux)
10981 p0 := v_0
10982 w := v_1
10983 x := v_2
10984 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
10985 break
10986 }
10987 mem := x.Args[2]
10988 p1 := x.Args[0]
10989 x_1 := x.Args[1]
10990 if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
10991 break
10992 }
10993 v.reset(OpAMD64MOVWstore)
10994 v.AuxInt = int32ToAuxInt(i)
10995 v.Aux = symToAux(s)
10996 v.AddArg3(p0, w, mem)
10997 return true
10998 }
10999
11000
11001
11002 for {
11003 i := auxIntToInt32(v.AuxInt)
11004 s := auxToSym(v.Aux)
11005 p1 := v_0
11006 if v_1.Op != OpAMD64SHRLconst {
11007 break
11008 }
11009 j := auxIntToInt8(v_1.AuxInt)
11010 w := v_1.Args[0]
11011 x := v_2
11012 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11013 break
11014 }
11015 mem := x.Args[2]
11016 p0 := x.Args[0]
11017 w0 := x.Args[1]
11018 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11019 break
11020 }
11021 v.reset(OpAMD64MOVWstore)
11022 v.AuxInt = int32ToAuxInt(i)
11023 v.Aux = symToAux(s)
11024 v.AddArg3(p0, w0, mem)
11025 return true
11026 }
11027
11028
11029
11030 for {
11031 i := auxIntToInt32(v.AuxInt)
11032 s := auxToSym(v.Aux)
11033 p1 := v_0
11034 if v_1.Op != OpAMD64SHRQconst {
11035 break
11036 }
11037 j := auxIntToInt8(v_1.AuxInt)
11038 w := v_1.Args[0]
11039 x := v_2
11040 if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11041 break
11042 }
11043 mem := x.Args[2]
11044 p0 := x.Args[0]
11045 w0 := x.Args[1]
11046 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
11047 break
11048 }
11049 v.reset(OpAMD64MOVWstore)
11050 v.AuxInt = int32ToAuxInt(i)
11051 v.Aux = symToAux(s)
11052 v.AddArg3(p0, w0, mem)
11053 return true
11054 }
11055
11056
11057
11058 for {
11059 if auxIntToInt32(v.AuxInt) != 7 {
11060 break
11061 }
11062 s := auxToSym(v.Aux)
11063 p1 := v_0
11064 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
11065 break
11066 }
11067 w := v_1.Args[0]
11068 x1 := v_2
11069 if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
11070 break
11071 }
11072 _ = x1.Args[2]
11073 if p1 != x1.Args[0] {
11074 break
11075 }
11076 x1_1 := x1.Args[1]
11077 if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
11078 break
11079 }
11080 x2 := x1.Args[2]
11081 if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
11082 break
11083 }
11084 _ = x2.Args[2]
11085 if p1 != x2.Args[0] {
11086 break
11087 }
11088 x2_1 := x2.Args[1]
11089 if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
11090 break
11091 }
11092 x3 := x2.Args[2]
11093 if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
11094 break
11095 }
11096 mem := x3.Args[2]
11097 if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
11098 break
11099 }
11100 v.reset(OpAMD64MOVQstore)
11101 v.Aux = symToAux(s)
11102 v.AddArg3(p1, w, mem)
11103 return true
11104 }
11105
11106
11107
11108 for {
11109 i := auxIntToInt32(v.AuxInt)
11110 s := auxToSym(v.Aux)
11111 p := v_0
11112 x1 := v_1
11113 if x1.Op != OpAMD64MOVBload {
11114 break
11115 }
11116 j := auxIntToInt32(x1.AuxInt)
11117 s2 := auxToSym(x1.Aux)
11118 mem := x1.Args[1]
11119 p2 := x1.Args[0]
11120 mem2 := v_2
11121 if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
11122 break
11123 }
11124 _ = mem2.Args[2]
11125 if p != mem2.Args[0] {
11126 break
11127 }
11128 x2 := mem2.Args[1]
11129 if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
11130 break
11131 }
11132 _ = x2.Args[1]
11133 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11134 break
11135 }
11136 v.reset(OpAMD64MOVWstore)
11137 v.AuxInt = int32ToAuxInt(i - 1)
11138 v.Aux = symToAux(s)
11139 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
11140 v0.AuxInt = int32ToAuxInt(j - 1)
11141 v0.Aux = symToAux(s2)
11142 v0.AddArg2(p2, mem)
11143 v.AddArg3(p, v0, mem)
11144 return true
11145 }
11146 return false
11147 }
11148 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
11149 v_1 := v.Args[1]
11150 v_0 := v.Args[0]
11151
11152
11153
11154 for {
11155 sc := auxIntToValAndOff(v.AuxInt)
11156 s := auxToSym(v.Aux)
11157 if v_0.Op != OpAMD64ADDQconst {
11158 break
11159 }
11160 off := auxIntToInt32(v_0.AuxInt)
11161 ptr := v_0.Args[0]
11162 mem := v_1
11163 if !(ValAndOff(sc).canAdd32(off)) {
11164 break
11165 }
11166 v.reset(OpAMD64MOVBstoreconst)
11167 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11168 v.Aux = symToAux(s)
11169 v.AddArg2(ptr, mem)
11170 return true
11171 }
11172
11173
11174
11175 for {
11176 sc := auxIntToValAndOff(v.AuxInt)
11177 sym1 := auxToSym(v.Aux)
11178 if v_0.Op != OpAMD64LEAQ {
11179 break
11180 }
11181 off := auxIntToInt32(v_0.AuxInt)
11182 sym2 := auxToSym(v_0.Aux)
11183 ptr := v_0.Args[0]
11184 mem := v_1
11185 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11186 break
11187 }
11188 v.reset(OpAMD64MOVBstoreconst)
11189 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11190 v.Aux = symToAux(mergeSym(sym1, sym2))
11191 v.AddArg2(ptr, mem)
11192 return true
11193 }
11194
11195
11196
11197 for {
11198 c := auxIntToValAndOff(v.AuxInt)
11199 s := auxToSym(v.Aux)
11200 p := v_0
11201 x := v_1
11202 if x.Op != OpAMD64MOVBstoreconst {
11203 break
11204 }
11205 a := auxIntToValAndOff(x.AuxInt)
11206 if auxToSym(x.Aux) != s {
11207 break
11208 }
11209 mem := x.Args[1]
11210 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11211 break
11212 }
11213 v.reset(OpAMD64MOVWstoreconst)
11214 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11215 v.Aux = symToAux(s)
11216 v.AddArg2(p, mem)
11217 return true
11218 }
11219
11220
11221
11222 for {
11223 a := auxIntToValAndOff(v.AuxInt)
11224 s := auxToSym(v.Aux)
11225 p := v_0
11226 x := v_1
11227 if x.Op != OpAMD64MOVBstoreconst {
11228 break
11229 }
11230 c := auxIntToValAndOff(x.AuxInt)
11231 if auxToSym(x.Aux) != s {
11232 break
11233 }
11234 mem := x.Args[1]
11235 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
11236 break
11237 }
11238 v.reset(OpAMD64MOVWstoreconst)
11239 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
11240 v.Aux = symToAux(s)
11241 v.AddArg2(p, mem)
11242 return true
11243 }
11244 return false
11245 }
11246 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
11247 v_0 := v.Args[0]
11248 b := v.Block
11249
11250
11251
11252 for {
11253 x := v_0
11254 if x.Op != OpAMD64MOVLload {
11255 break
11256 }
11257 off := auxIntToInt32(x.AuxInt)
11258 sym := auxToSym(x.Aux)
11259 mem := x.Args[1]
11260 ptr := x.Args[0]
11261 if !(x.Uses == 1 && clobber(x)) {
11262 break
11263 }
11264 b = x.Block
11265 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11266 v.copyOf(v0)
11267 v0.AuxInt = int32ToAuxInt(off)
11268 v0.Aux = symToAux(sym)
11269 v0.AddArg2(ptr, mem)
11270 return true
11271 }
11272
11273
11274
11275 for {
11276 x := v_0
11277 if x.Op != OpAMD64MOVQload {
11278 break
11279 }
11280 off := auxIntToInt32(x.AuxInt)
11281 sym := auxToSym(x.Aux)
11282 mem := x.Args[1]
11283 ptr := x.Args[0]
11284 if !(x.Uses == 1 && clobber(x)) {
11285 break
11286 }
11287 b = x.Block
11288 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
11289 v.copyOf(v0)
11290 v0.AuxInt = int32ToAuxInt(off)
11291 v0.Aux = symToAux(sym)
11292 v0.AddArg2(ptr, mem)
11293 return true
11294 }
11295
11296
11297
11298 for {
11299 if v_0.Op != OpAMD64ANDLconst {
11300 break
11301 }
11302 c := auxIntToInt32(v_0.AuxInt)
11303 x := v_0.Args[0]
11304 if !(uint32(c)&0x80000000 == 0) {
11305 break
11306 }
11307 v.reset(OpAMD64ANDLconst)
11308 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
11309 v.AddArg(x)
11310 return true
11311 }
11312
11313
11314 for {
11315 if v_0.Op != OpAMD64MOVLQSX {
11316 break
11317 }
11318 x := v_0.Args[0]
11319 v.reset(OpAMD64MOVLQSX)
11320 v.AddArg(x)
11321 return true
11322 }
11323
11324
11325 for {
11326 if v_0.Op != OpAMD64MOVWQSX {
11327 break
11328 }
11329 x := v_0.Args[0]
11330 v.reset(OpAMD64MOVWQSX)
11331 v.AddArg(x)
11332 return true
11333 }
11334
11335
11336 for {
11337 if v_0.Op != OpAMD64MOVBQSX {
11338 break
11339 }
11340 x := v_0.Args[0]
11341 v.reset(OpAMD64MOVBQSX)
11342 v.AddArg(x)
11343 return true
11344 }
11345 return false
11346 }
11347 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
11348 v_1 := v.Args[1]
11349 v_0 := v.Args[0]
11350
11351
11352
11353 for {
11354 off := auxIntToInt32(v.AuxInt)
11355 sym := auxToSym(v.Aux)
11356 ptr := v_0
11357 if v_1.Op != OpAMD64MOVLstore {
11358 break
11359 }
11360 off2 := auxIntToInt32(v_1.AuxInt)
11361 sym2 := auxToSym(v_1.Aux)
11362 x := v_1.Args[1]
11363 ptr2 := v_1.Args[0]
11364 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11365 break
11366 }
11367 v.reset(OpAMD64MOVLQSX)
11368 v.AddArg(x)
11369 return true
11370 }
11371
11372
11373
11374 for {
11375 off1 := auxIntToInt32(v.AuxInt)
11376 sym1 := auxToSym(v.Aux)
11377 if v_0.Op != OpAMD64LEAQ {
11378 break
11379 }
11380 off2 := auxIntToInt32(v_0.AuxInt)
11381 sym2 := auxToSym(v_0.Aux)
11382 base := v_0.Args[0]
11383 mem := v_1
11384 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11385 break
11386 }
11387 v.reset(OpAMD64MOVLQSXload)
11388 v.AuxInt = int32ToAuxInt(off1 + off2)
11389 v.Aux = symToAux(mergeSym(sym1, sym2))
11390 v.AddArg2(base, mem)
11391 return true
11392 }
11393 return false
11394 }
11395 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
11396 v_0 := v.Args[0]
11397 b := v.Block
11398
11399
11400
11401 for {
11402 x := v_0
11403 if x.Op != OpAMD64MOVLload {
11404 break
11405 }
11406 off := auxIntToInt32(x.AuxInt)
11407 sym := auxToSym(x.Aux)
11408 mem := x.Args[1]
11409 ptr := x.Args[0]
11410 if !(x.Uses == 1 && clobber(x)) {
11411 break
11412 }
11413 b = x.Block
11414 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11415 v.copyOf(v0)
11416 v0.AuxInt = int32ToAuxInt(off)
11417 v0.Aux = symToAux(sym)
11418 v0.AddArg2(ptr, mem)
11419 return true
11420 }
11421
11422
11423
11424 for {
11425 x := v_0
11426 if x.Op != OpAMD64MOVQload {
11427 break
11428 }
11429 off := auxIntToInt32(x.AuxInt)
11430 sym := auxToSym(x.Aux)
11431 mem := x.Args[1]
11432 ptr := x.Args[0]
11433 if !(x.Uses == 1 && clobber(x)) {
11434 break
11435 }
11436 b = x.Block
11437 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
11438 v.copyOf(v0)
11439 v0.AuxInt = int32ToAuxInt(off)
11440 v0.Aux = symToAux(sym)
11441 v0.AddArg2(ptr, mem)
11442 return true
11443 }
11444
11445
11446
11447 for {
11448 x := v_0
11449 if !(zeroUpper32Bits(x, 3)) {
11450 break
11451 }
11452 v.copyOf(x)
11453 return true
11454 }
11455
11456
11457 for {
11458 if v_0.Op != OpAMD64ANDLconst {
11459 break
11460 }
11461 c := auxIntToInt32(v_0.AuxInt)
11462 x := v_0.Args[0]
11463 v.reset(OpAMD64ANDLconst)
11464 v.AuxInt = int32ToAuxInt(c)
11465 v.AddArg(x)
11466 return true
11467 }
11468
11469
11470 for {
11471 if v_0.Op != OpAMD64MOVLQZX {
11472 break
11473 }
11474 x := v_0.Args[0]
11475 v.reset(OpAMD64MOVLQZX)
11476 v.AddArg(x)
11477 return true
11478 }
11479
11480
11481 for {
11482 if v_0.Op != OpAMD64MOVWQZX {
11483 break
11484 }
11485 x := v_0.Args[0]
11486 v.reset(OpAMD64MOVWQZX)
11487 v.AddArg(x)
11488 return true
11489 }
11490
11491
11492 for {
11493 if v_0.Op != OpAMD64MOVBQZX {
11494 break
11495 }
11496 x := v_0.Args[0]
11497 v.reset(OpAMD64MOVBQZX)
11498 v.AddArg(x)
11499 return true
11500 }
11501 return false
11502 }
11503 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
11504 v_1 := v.Args[1]
11505 v_0 := v.Args[0]
11506
11507
11508
11509 for {
11510 off1 := auxIntToInt32(v.AuxInt)
11511 sym := auxToSym(v.Aux)
11512 if v_0.Op != OpAMD64ADDQconst {
11513 break
11514 }
11515 off2 := auxIntToInt32(v_0.AuxInt)
11516 ptr := v_0.Args[0]
11517 mem := v_1
11518 if !(is32Bit(int64(off1) + int64(off2))) {
11519 break
11520 }
11521 v.reset(OpAMD64MOVLatomicload)
11522 v.AuxInt = int32ToAuxInt(off1 + off2)
11523 v.Aux = symToAux(sym)
11524 v.AddArg2(ptr, mem)
11525 return true
11526 }
11527
11528
11529
11530 for {
11531 off1 := auxIntToInt32(v.AuxInt)
11532 sym1 := auxToSym(v.Aux)
11533 if v_0.Op != OpAMD64LEAQ {
11534 break
11535 }
11536 off2 := auxIntToInt32(v_0.AuxInt)
11537 sym2 := auxToSym(v_0.Aux)
11538 ptr := v_0.Args[0]
11539 mem := v_1
11540 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11541 break
11542 }
11543 v.reset(OpAMD64MOVLatomicload)
11544 v.AuxInt = int32ToAuxInt(off1 + off2)
11545 v.Aux = symToAux(mergeSym(sym1, sym2))
11546 v.AddArg2(ptr, mem)
11547 return true
11548 }
11549 return false
11550 }
11551 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
11552 v_0 := v.Args[0]
11553 b := v.Block
11554
11555
11556
11557 for {
11558 t := v.Type
11559 if v_0.Op != OpArg {
11560 break
11561 }
11562 u := v_0.Type
11563 off := auxIntToInt32(v_0.AuxInt)
11564 sym := auxToSym(v_0.Aux)
11565 if !(t.Size() == u.Size()) {
11566 break
11567 }
11568 b = b.Func.Entry
11569 v0 := b.NewValue0(v.Pos, OpArg, t)
11570 v.copyOf(v0)
11571 v0.AuxInt = int32ToAuxInt(off)
11572 v0.Aux = symToAux(sym)
11573 return true
11574 }
11575 return false
11576 }
11577 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
11578 v_0 := v.Args[0]
11579 b := v.Block
11580
11581
11582
11583 for {
11584 t := v.Type
11585 if v_0.Op != OpArg {
11586 break
11587 }
11588 u := v_0.Type
11589 off := auxIntToInt32(v_0.AuxInt)
11590 sym := auxToSym(v_0.Aux)
11591 if !(t.Size() == u.Size()) {
11592 break
11593 }
11594 b = b.Func.Entry
11595 v0 := b.NewValue0(v.Pos, OpArg, t)
11596 v.copyOf(v0)
11597 v0.AuxInt = int32ToAuxInt(off)
11598 v0.Aux = symToAux(sym)
11599 return true
11600 }
11601 return false
11602 }
11603 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
11604 v_1 := v.Args[1]
11605 v_0 := v.Args[0]
11606 b := v.Block
11607 config := b.Func.Config
11608
11609
11610
11611 for {
11612 off := auxIntToInt32(v.AuxInt)
11613 sym := auxToSym(v.Aux)
11614 ptr := v_0
11615 if v_1.Op != OpAMD64MOVLstore {
11616 break
11617 }
11618 off2 := auxIntToInt32(v_1.AuxInt)
11619 sym2 := auxToSym(v_1.Aux)
11620 x := v_1.Args[1]
11621 ptr2 := v_1.Args[0]
11622 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11623 break
11624 }
11625 v.reset(OpAMD64MOVLQZX)
11626 v.AddArg(x)
11627 return true
11628 }
11629
11630
11631
11632 for {
11633 off1 := auxIntToInt32(v.AuxInt)
11634 sym := auxToSym(v.Aux)
11635 if v_0.Op != OpAMD64ADDQconst {
11636 break
11637 }
11638 off2 := auxIntToInt32(v_0.AuxInt)
11639 ptr := v_0.Args[0]
11640 mem := v_1
11641 if !(is32Bit(int64(off1) + int64(off2))) {
11642 break
11643 }
11644 v.reset(OpAMD64MOVLload)
11645 v.AuxInt = int32ToAuxInt(off1 + off2)
11646 v.Aux = symToAux(sym)
11647 v.AddArg2(ptr, mem)
11648 return true
11649 }
11650
11651
11652
11653 for {
11654 off1 := auxIntToInt32(v.AuxInt)
11655 sym1 := auxToSym(v.Aux)
11656 if v_0.Op != OpAMD64LEAQ {
11657 break
11658 }
11659 off2 := auxIntToInt32(v_0.AuxInt)
11660 sym2 := auxToSym(v_0.Aux)
11661 base := v_0.Args[0]
11662 mem := v_1
11663 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11664 break
11665 }
11666 v.reset(OpAMD64MOVLload)
11667 v.AuxInt = int32ToAuxInt(off1 + off2)
11668 v.Aux = symToAux(mergeSym(sym1, sym2))
11669 v.AddArg2(base, mem)
11670 return true
11671 }
11672
11673
11674 for {
11675 off := auxIntToInt32(v.AuxInt)
11676 sym := auxToSym(v.Aux)
11677 ptr := v_0
11678 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11679 break
11680 }
11681 val := v_1.Args[1]
11682 if ptr != v_1.Args[0] {
11683 break
11684 }
11685 v.reset(OpAMD64MOVLf2i)
11686 v.AddArg(val)
11687 return true
11688 }
11689
11690
11691
11692 for {
11693 off := auxIntToInt32(v.AuxInt)
11694 sym := auxToSym(v.Aux)
11695 if v_0.Op != OpSB || !(symIsRO(sym)) {
11696 break
11697 }
11698 v.reset(OpAMD64MOVQconst)
11699 v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11700 return true
11701 }
11702 return false
11703 }
11704 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
11705 v_2 := v.Args[2]
11706 v_1 := v.Args[1]
11707 v_0 := v.Args[0]
11708 b := v.Block
11709 typ := &b.Func.Config.Types
11710
11711
11712 for {
11713 off := auxIntToInt32(v.AuxInt)
11714 sym := auxToSym(v.Aux)
11715 ptr := v_0
11716 if v_1.Op != OpAMD64MOVLQSX {
11717 break
11718 }
11719 x := v_1.Args[0]
11720 mem := v_2
11721 v.reset(OpAMD64MOVLstore)
11722 v.AuxInt = int32ToAuxInt(off)
11723 v.Aux = symToAux(sym)
11724 v.AddArg3(ptr, x, mem)
11725 return true
11726 }
11727
11728
11729 for {
11730 off := auxIntToInt32(v.AuxInt)
11731 sym := auxToSym(v.Aux)
11732 ptr := v_0
11733 if v_1.Op != OpAMD64MOVLQZX {
11734 break
11735 }
11736 x := v_1.Args[0]
11737 mem := v_2
11738 v.reset(OpAMD64MOVLstore)
11739 v.AuxInt = int32ToAuxInt(off)
11740 v.Aux = symToAux(sym)
11741 v.AddArg3(ptr, x, mem)
11742 return true
11743 }
11744
11745
11746
11747 for {
11748 off1 := auxIntToInt32(v.AuxInt)
11749 sym := auxToSym(v.Aux)
11750 if v_0.Op != OpAMD64ADDQconst {
11751 break
11752 }
11753 off2 := auxIntToInt32(v_0.AuxInt)
11754 ptr := v_0.Args[0]
11755 val := v_1
11756 mem := v_2
11757 if !(is32Bit(int64(off1) + int64(off2))) {
11758 break
11759 }
11760 v.reset(OpAMD64MOVLstore)
11761 v.AuxInt = int32ToAuxInt(off1 + off2)
11762 v.Aux = symToAux(sym)
11763 v.AddArg3(ptr, val, mem)
11764 return true
11765 }
11766
11767
11768 for {
11769 off := auxIntToInt32(v.AuxInt)
11770 sym := auxToSym(v.Aux)
11771 ptr := v_0
11772 if v_1.Op != OpAMD64MOVLconst {
11773 break
11774 }
11775 c := auxIntToInt32(v_1.AuxInt)
11776 mem := v_2
11777 v.reset(OpAMD64MOVLstoreconst)
11778 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11779 v.Aux = symToAux(sym)
11780 v.AddArg2(ptr, mem)
11781 return true
11782 }
11783
11784
11785 for {
11786 off := auxIntToInt32(v.AuxInt)
11787 sym := auxToSym(v.Aux)
11788 ptr := v_0
11789 if v_1.Op != OpAMD64MOVQconst {
11790 break
11791 }
11792 c := auxIntToInt64(v_1.AuxInt)
11793 mem := v_2
11794 v.reset(OpAMD64MOVLstoreconst)
11795 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11796 v.Aux = symToAux(sym)
11797 v.AddArg2(ptr, mem)
11798 return true
11799 }
11800
11801
11802
11803 for {
11804 off1 := auxIntToInt32(v.AuxInt)
11805 sym1 := auxToSym(v.Aux)
11806 if v_0.Op != OpAMD64LEAQ {
11807 break
11808 }
11809 off2 := auxIntToInt32(v_0.AuxInt)
11810 sym2 := auxToSym(v_0.Aux)
11811 base := v_0.Args[0]
11812 val := v_1
11813 mem := v_2
11814 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11815 break
11816 }
11817 v.reset(OpAMD64MOVLstore)
11818 v.AuxInt = int32ToAuxInt(off1 + off2)
11819 v.Aux = symToAux(mergeSym(sym1, sym2))
11820 v.AddArg3(base, val, mem)
11821 return true
11822 }
11823
11824
11825
11826 for {
11827 i := auxIntToInt32(v.AuxInt)
11828 s := auxToSym(v.Aux)
11829 p := v_0
11830 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
11831 break
11832 }
11833 w := v_1.Args[0]
11834 x := v_2
11835 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
11836 break
11837 }
11838 mem := x.Args[2]
11839 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
11840 break
11841 }
11842 v.reset(OpAMD64MOVQstore)
11843 v.AuxInt = int32ToAuxInt(i - 4)
11844 v.Aux = symToAux(s)
11845 v.AddArg3(p, w, mem)
11846 return true
11847 }
11848
11849
11850
11851 for {
11852 i := auxIntToInt32(v.AuxInt)
11853 s := auxToSym(v.Aux)
11854 p := v_0
11855 if v_1.Op != OpAMD64SHRQconst {
11856 break
11857 }
11858 j := auxIntToInt8(v_1.AuxInt)
11859 w := v_1.Args[0]
11860 x := v_2
11861 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
11862 break
11863 }
11864 mem := x.Args[2]
11865 if p != x.Args[0] {
11866 break
11867 }
11868 w0 := x.Args[1]
11869 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
11870 break
11871 }
11872 v.reset(OpAMD64MOVQstore)
11873 v.AuxInt = int32ToAuxInt(i - 4)
11874 v.Aux = symToAux(s)
11875 v.AddArg3(p, w0, mem)
11876 return true
11877 }
11878
11879
11880
11881 for {
11882 i := auxIntToInt32(v.AuxInt)
11883 s := auxToSym(v.Aux)
11884 p1 := v_0
11885 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
11886 break
11887 }
11888 w := v_1.Args[0]
11889 x := v_2
11890 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11891 break
11892 }
11893 mem := x.Args[2]
11894 p0 := x.Args[0]
11895 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
11896 break
11897 }
11898 v.reset(OpAMD64MOVQstore)
11899 v.AuxInt = int32ToAuxInt(i)
11900 v.Aux = symToAux(s)
11901 v.AddArg3(p0, w, mem)
11902 return true
11903 }
11904
11905
11906
11907 for {
11908 i := auxIntToInt32(v.AuxInt)
11909 s := auxToSym(v.Aux)
11910 p1 := v_0
11911 if v_1.Op != OpAMD64SHRQconst {
11912 break
11913 }
11914 j := auxIntToInt8(v_1.AuxInt)
11915 w := v_1.Args[0]
11916 x := v_2
11917 if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
11918 break
11919 }
11920 mem := x.Args[2]
11921 p0 := x.Args[0]
11922 w0 := x.Args[1]
11923 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
11924 break
11925 }
11926 v.reset(OpAMD64MOVQstore)
11927 v.AuxInt = int32ToAuxInt(i)
11928 v.Aux = symToAux(s)
11929 v.AddArg3(p0, w0, mem)
11930 return true
11931 }
11932
11933
11934
11935 for {
11936 i := auxIntToInt32(v.AuxInt)
11937 s := auxToSym(v.Aux)
11938 p := v_0
11939 x1 := v_1
11940 if x1.Op != OpAMD64MOVLload {
11941 break
11942 }
11943 j := auxIntToInt32(x1.AuxInt)
11944 s2 := auxToSym(x1.Aux)
11945 mem := x1.Args[1]
11946 p2 := x1.Args[0]
11947 mem2 := v_2
11948 if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
11949 break
11950 }
11951 _ = mem2.Args[2]
11952 if p != mem2.Args[0] {
11953 break
11954 }
11955 x2 := mem2.Args[1]
11956 if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
11957 break
11958 }
11959 _ = x2.Args[1]
11960 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
11961 break
11962 }
11963 v.reset(OpAMD64MOVQstore)
11964 v.AuxInt = int32ToAuxInt(i - 4)
11965 v.Aux = symToAux(s)
11966 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
11967 v0.AuxInt = int32ToAuxInt(j - 4)
11968 v0.Aux = symToAux(s2)
11969 v0.AddArg2(p2, mem)
11970 v.AddArg3(p, v0, mem)
11971 return true
11972 }
11973
11974
11975
11976 for {
11977 off := auxIntToInt32(v.AuxInt)
11978 sym := auxToSym(v.Aux)
11979 ptr := v_0
11980 y := v_1
11981 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11982 break
11983 }
11984 mem := y.Args[2]
11985 x := y.Args[0]
11986 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11987 break
11988 }
11989 v.reset(OpAMD64ADDLmodify)
11990 v.AuxInt = int32ToAuxInt(off)
11991 v.Aux = symToAux(sym)
11992 v.AddArg3(ptr, x, mem)
11993 return true
11994 }
11995
11996
11997
11998 for {
11999 off := auxIntToInt32(v.AuxInt)
12000 sym := auxToSym(v.Aux)
12001 ptr := v_0
12002 y := v_1
12003 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12004 break
12005 }
12006 mem := y.Args[2]
12007 x := y.Args[0]
12008 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12009 break
12010 }
12011 v.reset(OpAMD64ANDLmodify)
12012 v.AuxInt = int32ToAuxInt(off)
12013 v.Aux = symToAux(sym)
12014 v.AddArg3(ptr, x, mem)
12015 return true
12016 }
12017
12018
12019
12020 for {
12021 off := auxIntToInt32(v.AuxInt)
12022 sym := auxToSym(v.Aux)
12023 ptr := v_0
12024 y := v_1
12025 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12026 break
12027 }
12028 mem := y.Args[2]
12029 x := y.Args[0]
12030 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12031 break
12032 }
12033 v.reset(OpAMD64ORLmodify)
12034 v.AuxInt = int32ToAuxInt(off)
12035 v.Aux = symToAux(sym)
12036 v.AddArg3(ptr, x, mem)
12037 return true
12038 }
12039
12040
12041
12042 for {
12043 off := auxIntToInt32(v.AuxInt)
12044 sym := auxToSym(v.Aux)
12045 ptr := v_0
12046 y := v_1
12047 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12048 break
12049 }
12050 mem := y.Args[2]
12051 x := y.Args[0]
12052 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12053 break
12054 }
12055 v.reset(OpAMD64XORLmodify)
12056 v.AuxInt = int32ToAuxInt(off)
12057 v.Aux = symToAux(sym)
12058 v.AddArg3(ptr, x, mem)
12059 return true
12060 }
12061
12062
12063
12064 for {
12065 off := auxIntToInt32(v.AuxInt)
12066 sym := auxToSym(v.Aux)
12067 ptr := v_0
12068 y := v_1
12069 if y.Op != OpAMD64ADDL {
12070 break
12071 }
12072 _ = y.Args[1]
12073 y_0 := y.Args[0]
12074 y_1 := y.Args[1]
12075 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12076 l := y_0
12077 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12078 continue
12079 }
12080 mem := l.Args[1]
12081 if ptr != l.Args[0] {
12082 continue
12083 }
12084 x := y_1
12085 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12086 continue
12087 }
12088 v.reset(OpAMD64ADDLmodify)
12089 v.AuxInt = int32ToAuxInt(off)
12090 v.Aux = symToAux(sym)
12091 v.AddArg3(ptr, x, mem)
12092 return true
12093 }
12094 break
12095 }
12096
12097
12098
12099 for {
12100 off := auxIntToInt32(v.AuxInt)
12101 sym := auxToSym(v.Aux)
12102 ptr := v_0
12103 y := v_1
12104 if y.Op != OpAMD64SUBL {
12105 break
12106 }
12107 x := y.Args[1]
12108 l := y.Args[0]
12109 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12110 break
12111 }
12112 mem := l.Args[1]
12113 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12114 break
12115 }
12116 v.reset(OpAMD64SUBLmodify)
12117 v.AuxInt = int32ToAuxInt(off)
12118 v.Aux = symToAux(sym)
12119 v.AddArg3(ptr, x, mem)
12120 return true
12121 }
12122
12123
12124
12125 for {
12126 off := auxIntToInt32(v.AuxInt)
12127 sym := auxToSym(v.Aux)
12128 ptr := v_0
12129 y := v_1
12130 if y.Op != OpAMD64ANDL {
12131 break
12132 }
12133 _ = y.Args[1]
12134 y_0 := y.Args[0]
12135 y_1 := y.Args[1]
12136 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12137 l := y_0
12138 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12139 continue
12140 }
12141 mem := l.Args[1]
12142 if ptr != l.Args[0] {
12143 continue
12144 }
12145 x := y_1
12146 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12147 continue
12148 }
12149 v.reset(OpAMD64ANDLmodify)
12150 v.AuxInt = int32ToAuxInt(off)
12151 v.Aux = symToAux(sym)
12152 v.AddArg3(ptr, x, mem)
12153 return true
12154 }
12155 break
12156 }
12157
12158
12159
12160 for {
12161 off := auxIntToInt32(v.AuxInt)
12162 sym := auxToSym(v.Aux)
12163 ptr := v_0
12164 y := v_1
12165 if y.Op != OpAMD64ORL {
12166 break
12167 }
12168 _ = y.Args[1]
12169 y_0 := y.Args[0]
12170 y_1 := y.Args[1]
12171 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12172 l := y_0
12173 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12174 continue
12175 }
12176 mem := l.Args[1]
12177 if ptr != l.Args[0] {
12178 continue
12179 }
12180 x := y_1
12181 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12182 continue
12183 }
12184 v.reset(OpAMD64ORLmodify)
12185 v.AuxInt = int32ToAuxInt(off)
12186 v.Aux = symToAux(sym)
12187 v.AddArg3(ptr, x, mem)
12188 return true
12189 }
12190 break
12191 }
12192
12193
12194
12195 for {
12196 off := auxIntToInt32(v.AuxInt)
12197 sym := auxToSym(v.Aux)
12198 ptr := v_0
12199 y := v_1
12200 if y.Op != OpAMD64XORL {
12201 break
12202 }
12203 _ = y.Args[1]
12204 y_0 := y.Args[0]
12205 y_1 := y.Args[1]
12206 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12207 l := y_0
12208 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12209 continue
12210 }
12211 mem := l.Args[1]
12212 if ptr != l.Args[0] {
12213 continue
12214 }
12215 x := y_1
12216 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12217 continue
12218 }
12219 v.reset(OpAMD64XORLmodify)
12220 v.AuxInt = int32ToAuxInt(off)
12221 v.Aux = symToAux(sym)
12222 v.AddArg3(ptr, x, mem)
12223 return true
12224 }
12225 break
12226 }
12227
12228
12229
12230 for {
12231 off := auxIntToInt32(v.AuxInt)
12232 sym := auxToSym(v.Aux)
12233 ptr := v_0
12234 a := v_1
12235 if a.Op != OpAMD64ADDLconst {
12236 break
12237 }
12238 c := auxIntToInt32(a.AuxInt)
12239 l := a.Args[0]
12240 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12241 break
12242 }
12243 mem := l.Args[1]
12244 ptr2 := l.Args[0]
12245 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12246 break
12247 }
12248 v.reset(OpAMD64ADDLconstmodify)
12249 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12250 v.Aux = symToAux(sym)
12251 v.AddArg2(ptr, mem)
12252 return true
12253 }
12254
12255
12256
12257 for {
12258 off := auxIntToInt32(v.AuxInt)
12259 sym := auxToSym(v.Aux)
12260 ptr := v_0
12261 a := v_1
12262 if a.Op != OpAMD64ANDLconst {
12263 break
12264 }
12265 c := auxIntToInt32(a.AuxInt)
12266 l := a.Args[0]
12267 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12268 break
12269 }
12270 mem := l.Args[1]
12271 ptr2 := l.Args[0]
12272 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12273 break
12274 }
12275 v.reset(OpAMD64ANDLconstmodify)
12276 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12277 v.Aux = symToAux(sym)
12278 v.AddArg2(ptr, mem)
12279 return true
12280 }
12281
12282
12283
12284 for {
12285 off := auxIntToInt32(v.AuxInt)
12286 sym := auxToSym(v.Aux)
12287 ptr := v_0
12288 a := v_1
12289 if a.Op != OpAMD64ORLconst {
12290 break
12291 }
12292 c := auxIntToInt32(a.AuxInt)
12293 l := a.Args[0]
12294 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12295 break
12296 }
12297 mem := l.Args[1]
12298 ptr2 := l.Args[0]
12299 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12300 break
12301 }
12302 v.reset(OpAMD64ORLconstmodify)
12303 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12304 v.Aux = symToAux(sym)
12305 v.AddArg2(ptr, mem)
12306 return true
12307 }
12308
12309
12310
12311 for {
12312 off := auxIntToInt32(v.AuxInt)
12313 sym := auxToSym(v.Aux)
12314 ptr := v_0
12315 a := v_1
12316 if a.Op != OpAMD64XORLconst {
12317 break
12318 }
12319 c := auxIntToInt32(a.AuxInt)
12320 l := a.Args[0]
12321 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12322 break
12323 }
12324 mem := l.Args[1]
12325 ptr2 := l.Args[0]
12326 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12327 break
12328 }
12329 v.reset(OpAMD64XORLconstmodify)
12330 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12331 v.Aux = symToAux(sym)
12332 v.AddArg2(ptr, mem)
12333 return true
12334 }
12335
12336
12337 for {
12338 off := auxIntToInt32(v.AuxInt)
12339 sym := auxToSym(v.Aux)
12340 ptr := v_0
12341 if v_1.Op != OpAMD64MOVLf2i {
12342 break
12343 }
12344 val := v_1.Args[0]
12345 mem := v_2
12346 v.reset(OpAMD64MOVSSstore)
12347 v.AuxInt = int32ToAuxInt(off)
12348 v.Aux = symToAux(sym)
12349 v.AddArg3(ptr, val, mem)
12350 return true
12351 }
12352
12353
12354
12355 for {
12356 i := auxIntToInt32(v.AuxInt)
12357 s := auxToSym(v.Aux)
12358 p := v_0
12359 x := v_1
12360 if x.Op != OpAMD64BSWAPL {
12361 break
12362 }
12363 w := x.Args[0]
12364 mem := v_2
12365 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12366 break
12367 }
12368 v.reset(OpAMD64MOVBELstore)
12369 v.AuxInt = int32ToAuxInt(i)
12370 v.Aux = symToAux(s)
12371 v.AddArg3(p, w, mem)
12372 return true
12373 }
12374 return false
12375 }
12376 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
12377 v_1 := v.Args[1]
12378 v_0 := v.Args[0]
12379 b := v.Block
12380 typ := &b.Func.Config.Types
12381
12382
12383
12384 for {
12385 sc := auxIntToValAndOff(v.AuxInt)
12386 s := auxToSym(v.Aux)
12387 if v_0.Op != OpAMD64ADDQconst {
12388 break
12389 }
12390 off := auxIntToInt32(v_0.AuxInt)
12391 ptr := v_0.Args[0]
12392 mem := v_1
12393 if !(ValAndOff(sc).canAdd32(off)) {
12394 break
12395 }
12396 v.reset(OpAMD64MOVLstoreconst)
12397 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12398 v.Aux = symToAux(s)
12399 v.AddArg2(ptr, mem)
12400 return true
12401 }
12402
12403
12404
12405 for {
12406 sc := auxIntToValAndOff(v.AuxInt)
12407 sym1 := auxToSym(v.Aux)
12408 if v_0.Op != OpAMD64LEAQ {
12409 break
12410 }
12411 off := auxIntToInt32(v_0.AuxInt)
12412 sym2 := auxToSym(v_0.Aux)
12413 ptr := v_0.Args[0]
12414 mem := v_1
12415 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12416 break
12417 }
12418 v.reset(OpAMD64MOVLstoreconst)
12419 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12420 v.Aux = symToAux(mergeSym(sym1, sym2))
12421 v.AddArg2(ptr, mem)
12422 return true
12423 }
12424
12425
12426
12427 for {
12428 c := auxIntToValAndOff(v.AuxInt)
12429 s := auxToSym(v.Aux)
12430 p := v_0
12431 x := v_1
12432 if x.Op != OpAMD64MOVLstoreconst {
12433 break
12434 }
12435 a := auxIntToValAndOff(x.AuxInt)
12436 if auxToSym(x.Aux) != s {
12437 break
12438 }
12439 mem := x.Args[1]
12440 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
12441 break
12442 }
12443 v.reset(OpAMD64MOVQstore)
12444 v.AuxInt = int32ToAuxInt(a.Off())
12445 v.Aux = symToAux(s)
12446 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
12447 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
12448 v.AddArg3(p, v0, mem)
12449 return true
12450 }
12451
12452
12453
12454 for {
12455 a := auxIntToValAndOff(v.AuxInt)
12456 s := auxToSym(v.Aux)
12457 p := v_0
12458 x := v_1
12459 if x.Op != OpAMD64MOVLstoreconst {
12460 break
12461 }
12462 c := auxIntToValAndOff(x.AuxInt)
12463 if auxToSym(x.Aux) != s {
12464 break
12465 }
12466 mem := x.Args[1]
12467 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
12468 break
12469 }
12470 v.reset(OpAMD64MOVQstore)
12471 v.AuxInt = int32ToAuxInt(a.Off())
12472 v.Aux = symToAux(s)
12473 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
12474 v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
12475 v.AddArg3(p, v0, mem)
12476 return true
12477 }
12478 return false
12479 }
12480 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
12481 v_1 := v.Args[1]
12482 v_0 := v.Args[0]
12483
12484
12485
12486 for {
12487 off1 := auxIntToInt32(v.AuxInt)
12488 sym := auxToSym(v.Aux)
12489 if v_0.Op != OpAMD64ADDQconst {
12490 break
12491 }
12492 off2 := auxIntToInt32(v_0.AuxInt)
12493 ptr := v_0.Args[0]
12494 mem := v_1
12495 if !(is32Bit(int64(off1) + int64(off2))) {
12496 break
12497 }
12498 v.reset(OpAMD64MOVOload)
12499 v.AuxInt = int32ToAuxInt(off1 + off2)
12500 v.Aux = symToAux(sym)
12501 v.AddArg2(ptr, mem)
12502 return true
12503 }
12504
12505
12506
12507 for {
12508 off1 := auxIntToInt32(v.AuxInt)
12509 sym1 := auxToSym(v.Aux)
12510 if v_0.Op != OpAMD64LEAQ {
12511 break
12512 }
12513 off2 := auxIntToInt32(v_0.AuxInt)
12514 sym2 := auxToSym(v_0.Aux)
12515 base := v_0.Args[0]
12516 mem := v_1
12517 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12518 break
12519 }
12520 v.reset(OpAMD64MOVOload)
12521 v.AuxInt = int32ToAuxInt(off1 + off2)
12522 v.Aux = symToAux(mergeSym(sym1, sym2))
12523 v.AddArg2(base, mem)
12524 return true
12525 }
12526 return false
12527 }
12528 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
12529 v_2 := v.Args[2]
12530 v_1 := v.Args[1]
12531 v_0 := v.Args[0]
12532 b := v.Block
12533 config := b.Func.Config
12534 typ := &b.Func.Config.Types
12535
12536
12537
12538 for {
12539 off1 := auxIntToInt32(v.AuxInt)
12540 sym := auxToSym(v.Aux)
12541 if v_0.Op != OpAMD64ADDQconst {
12542 break
12543 }
12544 off2 := auxIntToInt32(v_0.AuxInt)
12545 ptr := v_0.Args[0]
12546 val := v_1
12547 mem := v_2
12548 if !(is32Bit(int64(off1) + int64(off2))) {
12549 break
12550 }
12551 v.reset(OpAMD64MOVOstore)
12552 v.AuxInt = int32ToAuxInt(off1 + off2)
12553 v.Aux = symToAux(sym)
12554 v.AddArg3(ptr, val, mem)
12555 return true
12556 }
12557
12558
12559
12560 for {
12561 off1 := auxIntToInt32(v.AuxInt)
12562 sym1 := auxToSym(v.Aux)
12563 if v_0.Op != OpAMD64LEAQ {
12564 break
12565 }
12566 off2 := auxIntToInt32(v_0.AuxInt)
12567 sym2 := auxToSym(v_0.Aux)
12568 base := v_0.Args[0]
12569 val := v_1
12570 mem := v_2
12571 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12572 break
12573 }
12574 v.reset(OpAMD64MOVOstore)
12575 v.AuxInt = int32ToAuxInt(off1 + off2)
12576 v.Aux = symToAux(mergeSym(sym1, sym2))
12577 v.AddArg3(base, val, mem)
12578 return true
12579 }
12580
12581
12582
12583 for {
12584 dstOff := auxIntToInt32(v.AuxInt)
12585 dstSym := auxToSym(v.Aux)
12586 ptr := v_0
12587 if v_1.Op != OpAMD64MOVOload {
12588 break
12589 }
12590 srcOff := auxIntToInt32(v_1.AuxInt)
12591 srcSym := auxToSym(v_1.Aux)
12592 v_1_0 := v_1.Args[0]
12593 if v_1_0.Op != OpSB {
12594 break
12595 }
12596 mem := v_2
12597 if !(symIsRO(srcSym)) {
12598 break
12599 }
12600 v.reset(OpAMD64MOVQstore)
12601 v.AuxInt = int32ToAuxInt(dstOff + 8)
12602 v.Aux = symToAux(dstSym)
12603 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
12604 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
12605 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
12606 v1.AuxInt = int32ToAuxInt(dstOff)
12607 v1.Aux = symToAux(dstSym)
12608 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
12609 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
12610 v1.AddArg3(ptr, v2, mem)
12611 v.AddArg3(ptr, v0, v1)
12612 return true
12613 }
12614 return false
12615 }
12616 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
12617 v_1 := v.Args[1]
12618 v_0 := v.Args[0]
12619
12620
12621
12622 for {
12623 sc := auxIntToValAndOff(v.AuxInt)
12624 s := auxToSym(v.Aux)
12625 if v_0.Op != OpAMD64ADDQconst {
12626 break
12627 }
12628 off := auxIntToInt32(v_0.AuxInt)
12629 ptr := v_0.Args[0]
12630 mem := v_1
12631 if !(ValAndOff(sc).canAdd32(off)) {
12632 break
12633 }
12634 v.reset(OpAMD64MOVOstoreconst)
12635 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12636 v.Aux = symToAux(s)
12637 v.AddArg2(ptr, mem)
12638 return true
12639 }
12640
12641
12642
12643 for {
12644 sc := auxIntToValAndOff(v.AuxInt)
12645 sym1 := auxToSym(v.Aux)
12646 if v_0.Op != OpAMD64LEAQ {
12647 break
12648 }
12649 off := auxIntToInt32(v_0.AuxInt)
12650 sym2 := auxToSym(v_0.Aux)
12651 ptr := v_0.Args[0]
12652 mem := v_1
12653 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12654 break
12655 }
12656 v.reset(OpAMD64MOVOstoreconst)
12657 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12658 v.Aux = symToAux(mergeSym(sym1, sym2))
12659 v.AddArg2(ptr, mem)
12660 return true
12661 }
12662 return false
12663 }
12664 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
12665 v_1 := v.Args[1]
12666 v_0 := v.Args[0]
12667
12668
12669
12670 for {
12671 off1 := auxIntToInt32(v.AuxInt)
12672 sym := auxToSym(v.Aux)
12673 if v_0.Op != OpAMD64ADDQconst {
12674 break
12675 }
12676 off2 := auxIntToInt32(v_0.AuxInt)
12677 ptr := v_0.Args[0]
12678 mem := v_1
12679 if !(is32Bit(int64(off1) + int64(off2))) {
12680 break
12681 }
12682 v.reset(OpAMD64MOVQatomicload)
12683 v.AuxInt = int32ToAuxInt(off1 + off2)
12684 v.Aux = symToAux(sym)
12685 v.AddArg2(ptr, mem)
12686 return true
12687 }
12688
12689
12690
12691 for {
12692 off1 := auxIntToInt32(v.AuxInt)
12693 sym1 := auxToSym(v.Aux)
12694 if v_0.Op != OpAMD64LEAQ {
12695 break
12696 }
12697 off2 := auxIntToInt32(v_0.AuxInt)
12698 sym2 := auxToSym(v_0.Aux)
12699 ptr := v_0.Args[0]
12700 mem := v_1
12701 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12702 break
12703 }
12704 v.reset(OpAMD64MOVQatomicload)
12705 v.AuxInt = int32ToAuxInt(off1 + off2)
12706 v.Aux = symToAux(mergeSym(sym1, sym2))
12707 v.AddArg2(ptr, mem)
12708 return true
12709 }
12710 return false
12711 }
12712 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
12713 v_0 := v.Args[0]
12714 b := v.Block
12715
12716
12717
12718 for {
12719 t := v.Type
12720 if v_0.Op != OpArg {
12721 break
12722 }
12723 u := v_0.Type
12724 off := auxIntToInt32(v_0.AuxInt)
12725 sym := auxToSym(v_0.Aux)
12726 if !(t.Size() == u.Size()) {
12727 break
12728 }
12729 b = b.Func.Entry
12730 v0 := b.NewValue0(v.Pos, OpArg, t)
12731 v.copyOf(v0)
12732 v0.AuxInt = int32ToAuxInt(off)
12733 v0.Aux = symToAux(sym)
12734 return true
12735 }
12736 return false
12737 }
12738 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
12739 v_0 := v.Args[0]
12740 b := v.Block
12741
12742
12743
12744 for {
12745 t := v.Type
12746 if v_0.Op != OpArg {
12747 break
12748 }
12749 u := v_0.Type
12750 off := auxIntToInt32(v_0.AuxInt)
12751 sym := auxToSym(v_0.Aux)
12752 if !(t.Size() == u.Size()) {
12753 break
12754 }
12755 b = b.Func.Entry
12756 v0 := b.NewValue0(v.Pos, OpArg, t)
12757 v.copyOf(v0)
12758 v0.AuxInt = int32ToAuxInt(off)
12759 v0.Aux = symToAux(sym)
12760 return true
12761 }
12762 return false
12763 }
12764 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
12765 v_1 := v.Args[1]
12766 v_0 := v.Args[0]
12767 b := v.Block
12768 config := b.Func.Config
12769
12770
12771
12772 for {
12773 off := auxIntToInt32(v.AuxInt)
12774 sym := auxToSym(v.Aux)
12775 ptr := v_0
12776 if v_1.Op != OpAMD64MOVQstore {
12777 break
12778 }
12779 off2 := auxIntToInt32(v_1.AuxInt)
12780 sym2 := auxToSym(v_1.Aux)
12781 x := v_1.Args[1]
12782 ptr2 := v_1.Args[0]
12783 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12784 break
12785 }
12786 v.copyOf(x)
12787 return true
12788 }
12789
12790
12791
12792 for {
12793 off1 := auxIntToInt32(v.AuxInt)
12794 sym := auxToSym(v.Aux)
12795 if v_0.Op != OpAMD64ADDQconst {
12796 break
12797 }
12798 off2 := auxIntToInt32(v_0.AuxInt)
12799 ptr := v_0.Args[0]
12800 mem := v_1
12801 if !(is32Bit(int64(off1) + int64(off2))) {
12802 break
12803 }
12804 v.reset(OpAMD64MOVQload)
12805 v.AuxInt = int32ToAuxInt(off1 + off2)
12806 v.Aux = symToAux(sym)
12807 v.AddArg2(ptr, mem)
12808 return true
12809 }
12810
12811
12812
12813 for {
12814 off1 := auxIntToInt32(v.AuxInt)
12815 sym1 := auxToSym(v.Aux)
12816 if v_0.Op != OpAMD64LEAQ {
12817 break
12818 }
12819 off2 := auxIntToInt32(v_0.AuxInt)
12820 sym2 := auxToSym(v_0.Aux)
12821 base := v_0.Args[0]
12822 mem := v_1
12823 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12824 break
12825 }
12826 v.reset(OpAMD64MOVQload)
12827 v.AuxInt = int32ToAuxInt(off1 + off2)
12828 v.Aux = symToAux(mergeSym(sym1, sym2))
12829 v.AddArg2(base, mem)
12830 return true
12831 }
12832
12833
12834 for {
12835 off := auxIntToInt32(v.AuxInt)
12836 sym := auxToSym(v.Aux)
12837 ptr := v_0
12838 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12839 break
12840 }
12841 val := v_1.Args[1]
12842 if ptr != v_1.Args[0] {
12843 break
12844 }
12845 v.reset(OpAMD64MOVQf2i)
12846 v.AddArg(val)
12847 return true
12848 }
12849
12850
12851
12852 for {
12853 off := auxIntToInt32(v.AuxInt)
12854 sym := auxToSym(v.Aux)
12855 if v_0.Op != OpSB || !(symIsRO(sym)) {
12856 break
12857 }
12858 v.reset(OpAMD64MOVQconst)
12859 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
12860 return true
12861 }
12862 return false
12863 }
12864 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
12865 v_2 := v.Args[2]
12866 v_1 := v.Args[1]
12867 v_0 := v.Args[0]
12868
12869
12870
12871 for {
12872 off1 := auxIntToInt32(v.AuxInt)
12873 sym := auxToSym(v.Aux)
12874 if v_0.Op != OpAMD64ADDQconst {
12875 break
12876 }
12877 off2 := auxIntToInt32(v_0.AuxInt)
12878 ptr := v_0.Args[0]
12879 val := v_1
12880 mem := v_2
12881 if !(is32Bit(int64(off1) + int64(off2))) {
12882 break
12883 }
12884 v.reset(OpAMD64MOVQstore)
12885 v.AuxInt = int32ToAuxInt(off1 + off2)
12886 v.Aux = symToAux(sym)
12887 v.AddArg3(ptr, val, mem)
12888 return true
12889 }
12890
12891
12892
12893 for {
12894 off := auxIntToInt32(v.AuxInt)
12895 sym := auxToSym(v.Aux)
12896 ptr := v_0
12897 if v_1.Op != OpAMD64MOVQconst {
12898 break
12899 }
12900 c := auxIntToInt64(v_1.AuxInt)
12901 mem := v_2
12902 if !(validVal(c)) {
12903 break
12904 }
12905 v.reset(OpAMD64MOVQstoreconst)
12906 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12907 v.Aux = symToAux(sym)
12908 v.AddArg2(ptr, mem)
12909 return true
12910 }
12911
12912
12913
12914 for {
12915 off1 := auxIntToInt32(v.AuxInt)
12916 sym1 := auxToSym(v.Aux)
12917 if v_0.Op != OpAMD64LEAQ {
12918 break
12919 }
12920 off2 := auxIntToInt32(v_0.AuxInt)
12921 sym2 := auxToSym(v_0.Aux)
12922 base := v_0.Args[0]
12923 val := v_1
12924 mem := v_2
12925 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12926 break
12927 }
12928 v.reset(OpAMD64MOVQstore)
12929 v.AuxInt = int32ToAuxInt(off1 + off2)
12930 v.Aux = symToAux(mergeSym(sym1, sym2))
12931 v.AddArg3(base, val, mem)
12932 return true
12933 }
12934
12935
12936
12937 for {
12938 off := auxIntToInt32(v.AuxInt)
12939 sym := auxToSym(v.Aux)
12940 ptr := v_0
12941 y := v_1
12942 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12943 break
12944 }
12945 mem := y.Args[2]
12946 x := y.Args[0]
12947 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12948 break
12949 }
12950 v.reset(OpAMD64ADDQmodify)
12951 v.AuxInt = int32ToAuxInt(off)
12952 v.Aux = symToAux(sym)
12953 v.AddArg3(ptr, x, mem)
12954 return true
12955 }
12956
12957
12958
12959 for {
12960 off := auxIntToInt32(v.AuxInt)
12961 sym := auxToSym(v.Aux)
12962 ptr := v_0
12963 y := v_1
12964 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12965 break
12966 }
12967 mem := y.Args[2]
12968 x := y.Args[0]
12969 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12970 break
12971 }
12972 v.reset(OpAMD64ANDQmodify)
12973 v.AuxInt = int32ToAuxInt(off)
12974 v.Aux = symToAux(sym)
12975 v.AddArg3(ptr, x, mem)
12976 return true
12977 }
12978
12979
12980
12981 for {
12982 off := auxIntToInt32(v.AuxInt)
12983 sym := auxToSym(v.Aux)
12984 ptr := v_0
12985 y := v_1
12986 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12987 break
12988 }
12989 mem := y.Args[2]
12990 x := y.Args[0]
12991 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12992 break
12993 }
12994 v.reset(OpAMD64ORQmodify)
12995 v.AuxInt = int32ToAuxInt(off)
12996 v.Aux = symToAux(sym)
12997 v.AddArg3(ptr, x, mem)
12998 return true
12999 }
13000
13001
13002
13003 for {
13004 off := auxIntToInt32(v.AuxInt)
13005 sym := auxToSym(v.Aux)
13006 ptr := v_0
13007 y := v_1
13008 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
13009 break
13010 }
13011 mem := y.Args[2]
13012 x := y.Args[0]
13013 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
13014 break
13015 }
13016 v.reset(OpAMD64XORQmodify)
13017 v.AuxInt = int32ToAuxInt(off)
13018 v.Aux = symToAux(sym)
13019 v.AddArg3(ptr, x, mem)
13020 return true
13021 }
13022
13023
13024
13025 for {
13026 off := auxIntToInt32(v.AuxInt)
13027 sym := auxToSym(v.Aux)
13028 ptr := v_0
13029 y := v_1
13030 if y.Op != OpAMD64ADDQ {
13031 break
13032 }
13033 _ = y.Args[1]
13034 y_0 := y.Args[0]
13035 y_1 := y.Args[1]
13036 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13037 l := y_0
13038 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13039 continue
13040 }
13041 mem := l.Args[1]
13042 if ptr != l.Args[0] {
13043 continue
13044 }
13045 x := y_1
13046 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13047 continue
13048 }
13049 v.reset(OpAMD64ADDQmodify)
13050 v.AuxInt = int32ToAuxInt(off)
13051 v.Aux = symToAux(sym)
13052 v.AddArg3(ptr, x, mem)
13053 return true
13054 }
13055 break
13056 }
13057
13058
13059
13060 for {
13061 off := auxIntToInt32(v.AuxInt)
13062 sym := auxToSym(v.Aux)
13063 ptr := v_0
13064 y := v_1
13065 if y.Op != OpAMD64SUBQ {
13066 break
13067 }
13068 x := y.Args[1]
13069 l := y.Args[0]
13070 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13071 break
13072 }
13073 mem := l.Args[1]
13074 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13075 break
13076 }
13077 v.reset(OpAMD64SUBQmodify)
13078 v.AuxInt = int32ToAuxInt(off)
13079 v.Aux = symToAux(sym)
13080 v.AddArg3(ptr, x, mem)
13081 return true
13082 }
13083
13084
13085
13086 for {
13087 off := auxIntToInt32(v.AuxInt)
13088 sym := auxToSym(v.Aux)
13089 ptr := v_0
13090 y := v_1
13091 if y.Op != OpAMD64ANDQ {
13092 break
13093 }
13094 _ = y.Args[1]
13095 y_0 := y.Args[0]
13096 y_1 := y.Args[1]
13097 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13098 l := y_0
13099 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13100 continue
13101 }
13102 mem := l.Args[1]
13103 if ptr != l.Args[0] {
13104 continue
13105 }
13106 x := y_1
13107 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13108 continue
13109 }
13110 v.reset(OpAMD64ANDQmodify)
13111 v.AuxInt = int32ToAuxInt(off)
13112 v.Aux = symToAux(sym)
13113 v.AddArg3(ptr, x, mem)
13114 return true
13115 }
13116 break
13117 }
13118
13119
13120
13121 for {
13122 off := auxIntToInt32(v.AuxInt)
13123 sym := auxToSym(v.Aux)
13124 ptr := v_0
13125 y := v_1
13126 if y.Op != OpAMD64ORQ {
13127 break
13128 }
13129 _ = y.Args[1]
13130 y_0 := y.Args[0]
13131 y_1 := y.Args[1]
13132 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13133 l := y_0
13134 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13135 continue
13136 }
13137 mem := l.Args[1]
13138 if ptr != l.Args[0] {
13139 continue
13140 }
13141 x := y_1
13142 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13143 continue
13144 }
13145 v.reset(OpAMD64ORQmodify)
13146 v.AuxInt = int32ToAuxInt(off)
13147 v.Aux = symToAux(sym)
13148 v.AddArg3(ptr, x, mem)
13149 return true
13150 }
13151 break
13152 }
13153
13154
13155
13156 for {
13157 off := auxIntToInt32(v.AuxInt)
13158 sym := auxToSym(v.Aux)
13159 ptr := v_0
13160 y := v_1
13161 if y.Op != OpAMD64XORQ {
13162 break
13163 }
13164 _ = y.Args[1]
13165 y_0 := y.Args[0]
13166 y_1 := y.Args[1]
13167 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
13168 l := y_0
13169 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13170 continue
13171 }
13172 mem := l.Args[1]
13173 if ptr != l.Args[0] {
13174 continue
13175 }
13176 x := y_1
13177 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
13178 continue
13179 }
13180 v.reset(OpAMD64XORQmodify)
13181 v.AuxInt = int32ToAuxInt(off)
13182 v.Aux = symToAux(sym)
13183 v.AddArg3(ptr, x, mem)
13184 return true
13185 }
13186 break
13187 }
13188
13189
13190
13191 for {
13192 off := auxIntToInt32(v.AuxInt)
13193 sym := auxToSym(v.Aux)
13194 ptr := v_0
13195 a := v_1
13196 if a.Op != OpAMD64ADDQconst {
13197 break
13198 }
13199 c := auxIntToInt32(a.AuxInt)
13200 l := a.Args[0]
13201 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13202 break
13203 }
13204 mem := l.Args[1]
13205 ptr2 := l.Args[0]
13206 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13207 break
13208 }
13209 v.reset(OpAMD64ADDQconstmodify)
13210 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13211 v.Aux = symToAux(sym)
13212 v.AddArg2(ptr, mem)
13213 return true
13214 }
13215
13216
13217
13218 for {
13219 off := auxIntToInt32(v.AuxInt)
13220 sym := auxToSym(v.Aux)
13221 ptr := v_0
13222 a := v_1
13223 if a.Op != OpAMD64ANDQconst {
13224 break
13225 }
13226 c := auxIntToInt32(a.AuxInt)
13227 l := a.Args[0]
13228 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13229 break
13230 }
13231 mem := l.Args[1]
13232 ptr2 := l.Args[0]
13233 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13234 break
13235 }
13236 v.reset(OpAMD64ANDQconstmodify)
13237 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13238 v.Aux = symToAux(sym)
13239 v.AddArg2(ptr, mem)
13240 return true
13241 }
13242
13243
13244
13245 for {
13246 off := auxIntToInt32(v.AuxInt)
13247 sym := auxToSym(v.Aux)
13248 ptr := v_0
13249 a := v_1
13250 if a.Op != OpAMD64ORQconst {
13251 break
13252 }
13253 c := auxIntToInt32(a.AuxInt)
13254 l := a.Args[0]
13255 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13256 break
13257 }
13258 mem := l.Args[1]
13259 ptr2 := l.Args[0]
13260 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13261 break
13262 }
13263 v.reset(OpAMD64ORQconstmodify)
13264 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13265 v.Aux = symToAux(sym)
13266 v.AddArg2(ptr, mem)
13267 return true
13268 }
13269
13270
13271
13272 for {
13273 off := auxIntToInt32(v.AuxInt)
13274 sym := auxToSym(v.Aux)
13275 ptr := v_0
13276 a := v_1
13277 if a.Op != OpAMD64XORQconst {
13278 break
13279 }
13280 c := auxIntToInt32(a.AuxInt)
13281 l := a.Args[0]
13282 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
13283 break
13284 }
13285 mem := l.Args[1]
13286 ptr2 := l.Args[0]
13287 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
13288 break
13289 }
13290 v.reset(OpAMD64XORQconstmodify)
13291 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
13292 v.Aux = symToAux(sym)
13293 v.AddArg2(ptr, mem)
13294 return true
13295 }
13296
13297
13298 for {
13299 off := auxIntToInt32(v.AuxInt)
13300 sym := auxToSym(v.Aux)
13301 ptr := v_0
13302 if v_1.Op != OpAMD64MOVQf2i {
13303 break
13304 }
13305 val := v_1.Args[0]
13306 mem := v_2
13307 v.reset(OpAMD64MOVSDstore)
13308 v.AuxInt = int32ToAuxInt(off)
13309 v.Aux = symToAux(sym)
13310 v.AddArg3(ptr, val, mem)
13311 return true
13312 }
13313
13314
13315
13316 for {
13317 i := auxIntToInt32(v.AuxInt)
13318 s := auxToSym(v.Aux)
13319 p := v_0
13320 x := v_1
13321 if x.Op != OpAMD64BSWAPQ {
13322 break
13323 }
13324 w := x.Args[0]
13325 mem := v_2
13326 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
13327 break
13328 }
13329 v.reset(OpAMD64MOVBEQstore)
13330 v.AuxInt = int32ToAuxInt(i)
13331 v.Aux = symToAux(s)
13332 v.AddArg3(p, w, mem)
13333 return true
13334 }
13335 return false
13336 }
13337 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
13338 v_1 := v.Args[1]
13339 v_0 := v.Args[0]
13340 b := v.Block
13341 config := b.Func.Config
13342
13343
13344
13345 for {
13346 sc := auxIntToValAndOff(v.AuxInt)
13347 s := auxToSym(v.Aux)
13348 if v_0.Op != OpAMD64ADDQconst {
13349 break
13350 }
13351 off := auxIntToInt32(v_0.AuxInt)
13352 ptr := v_0.Args[0]
13353 mem := v_1
13354 if !(ValAndOff(sc).canAdd32(off)) {
13355 break
13356 }
13357 v.reset(OpAMD64MOVQstoreconst)
13358 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13359 v.Aux = symToAux(s)
13360 v.AddArg2(ptr, mem)
13361 return true
13362 }
13363
13364
13365
13366 for {
13367 sc := auxIntToValAndOff(v.AuxInt)
13368 sym1 := auxToSym(v.Aux)
13369 if v_0.Op != OpAMD64LEAQ {
13370 break
13371 }
13372 off := auxIntToInt32(v_0.AuxInt)
13373 sym2 := auxToSym(v_0.Aux)
13374 ptr := v_0.Args[0]
13375 mem := v_1
13376 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13377 break
13378 }
13379 v.reset(OpAMD64MOVQstoreconst)
13380 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13381 v.Aux = symToAux(mergeSym(sym1, sym2))
13382 v.AddArg2(ptr, mem)
13383 return true
13384 }
13385
13386
13387
13388 for {
13389 c := auxIntToValAndOff(v.AuxInt)
13390 s := auxToSym(v.Aux)
13391 p := v_0
13392 x := v_1
13393 if x.Op != OpAMD64MOVQstoreconst {
13394 break
13395 }
13396 a := auxIntToValAndOff(x.AuxInt)
13397 if auxToSym(x.Aux) != s {
13398 break
13399 }
13400 mem := x.Args[1]
13401 if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && a.Off()+8 == c.Off() && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
13402 break
13403 }
13404 v.reset(OpAMD64MOVOstoreconst)
13405 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
13406 v.Aux = symToAux(s)
13407 v.AddArg2(p, mem)
13408 return true
13409 }
13410
13411
13412
13413 for {
13414 a := auxIntToValAndOff(v.AuxInt)
13415 s := auxToSym(v.Aux)
13416 p := v_0
13417 x := v_1
13418 if x.Op != OpAMD64MOVQstoreconst {
13419 break
13420 }
13421 c := auxIntToValAndOff(x.AuxInt)
13422 if auxToSym(x.Aux) != s {
13423 break
13424 }
13425 mem := x.Args[1]
13426 if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && a.Off()+8 == c.Off() && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
13427 break
13428 }
13429 v.reset(OpAMD64MOVOstoreconst)
13430 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
13431 v.Aux = symToAux(s)
13432 v.AddArg2(p, mem)
13433 return true
13434 }
13435 return false
13436 }
13437 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
13438 v_1 := v.Args[1]
13439 v_0 := v.Args[0]
13440
13441
13442
13443 for {
13444 off1 := auxIntToInt32(v.AuxInt)
13445 sym := auxToSym(v.Aux)
13446 if v_0.Op != OpAMD64ADDQconst {
13447 break
13448 }
13449 off2 := auxIntToInt32(v_0.AuxInt)
13450 ptr := v_0.Args[0]
13451 mem := v_1
13452 if !(is32Bit(int64(off1) + int64(off2))) {
13453 break
13454 }
13455 v.reset(OpAMD64MOVSDload)
13456 v.AuxInt = int32ToAuxInt(off1 + off2)
13457 v.Aux = symToAux(sym)
13458 v.AddArg2(ptr, mem)
13459 return true
13460 }
13461
13462
13463
13464 for {
13465 off1 := auxIntToInt32(v.AuxInt)
13466 sym1 := auxToSym(v.Aux)
13467 if v_0.Op != OpAMD64LEAQ {
13468 break
13469 }
13470 off2 := auxIntToInt32(v_0.AuxInt)
13471 sym2 := auxToSym(v_0.Aux)
13472 base := v_0.Args[0]
13473 mem := v_1
13474 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13475 break
13476 }
13477 v.reset(OpAMD64MOVSDload)
13478 v.AuxInt = int32ToAuxInt(off1 + off2)
13479 v.Aux = symToAux(mergeSym(sym1, sym2))
13480 v.AddArg2(base, mem)
13481 return true
13482 }
13483
13484
13485 for {
13486 off := auxIntToInt32(v.AuxInt)
13487 sym := auxToSym(v.Aux)
13488 ptr := v_0
13489 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
13490 break
13491 }
13492 val := v_1.Args[1]
13493 if ptr != v_1.Args[0] {
13494 break
13495 }
13496 v.reset(OpAMD64MOVQi2f)
13497 v.AddArg(val)
13498 return true
13499 }
13500 return false
13501 }
13502 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
13503 v_2 := v.Args[2]
13504 v_1 := v.Args[1]
13505 v_0 := v.Args[0]
13506
13507
13508
13509 for {
13510 off1 := auxIntToInt32(v.AuxInt)
13511 sym := auxToSym(v.Aux)
13512 if v_0.Op != OpAMD64ADDQconst {
13513 break
13514 }
13515 off2 := auxIntToInt32(v_0.AuxInt)
13516 ptr := v_0.Args[0]
13517 val := v_1
13518 mem := v_2
13519 if !(is32Bit(int64(off1) + int64(off2))) {
13520 break
13521 }
13522 v.reset(OpAMD64MOVSDstore)
13523 v.AuxInt = int32ToAuxInt(off1 + off2)
13524 v.Aux = symToAux(sym)
13525 v.AddArg3(ptr, val, mem)
13526 return true
13527 }
13528
13529
13530
13531 for {
13532 off1 := auxIntToInt32(v.AuxInt)
13533 sym1 := auxToSym(v.Aux)
13534 if v_0.Op != OpAMD64LEAQ {
13535 break
13536 }
13537 off2 := auxIntToInt32(v_0.AuxInt)
13538 sym2 := auxToSym(v_0.Aux)
13539 base := v_0.Args[0]
13540 val := v_1
13541 mem := v_2
13542 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13543 break
13544 }
13545 v.reset(OpAMD64MOVSDstore)
13546 v.AuxInt = int32ToAuxInt(off1 + off2)
13547 v.Aux = symToAux(mergeSym(sym1, sym2))
13548 v.AddArg3(base, val, mem)
13549 return true
13550 }
13551
13552
13553 for {
13554 off := auxIntToInt32(v.AuxInt)
13555 sym := auxToSym(v.Aux)
13556 ptr := v_0
13557 if v_1.Op != OpAMD64MOVQi2f {
13558 break
13559 }
13560 val := v_1.Args[0]
13561 mem := v_2
13562 v.reset(OpAMD64MOVQstore)
13563 v.AuxInt = int32ToAuxInt(off)
13564 v.Aux = symToAux(sym)
13565 v.AddArg3(ptr, val, mem)
13566 return true
13567 }
13568 return false
13569 }
13570 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
13571 v_1 := v.Args[1]
13572 v_0 := v.Args[0]
13573
13574
13575
13576 for {
13577 off1 := auxIntToInt32(v.AuxInt)
13578 sym := auxToSym(v.Aux)
13579 if v_0.Op != OpAMD64ADDQconst {
13580 break
13581 }
13582 off2 := auxIntToInt32(v_0.AuxInt)
13583 ptr := v_0.Args[0]
13584 mem := v_1
13585 if !(is32Bit(int64(off1) + int64(off2))) {
13586 break
13587 }
13588 v.reset(OpAMD64MOVSSload)
13589 v.AuxInt = int32ToAuxInt(off1 + off2)
13590 v.Aux = symToAux(sym)
13591 v.AddArg2(ptr, mem)
13592 return true
13593 }
13594
13595
13596
13597 for {
13598 off1 := auxIntToInt32(v.AuxInt)
13599 sym1 := auxToSym(v.Aux)
13600 if v_0.Op != OpAMD64LEAQ {
13601 break
13602 }
13603 off2 := auxIntToInt32(v_0.AuxInt)
13604 sym2 := auxToSym(v_0.Aux)
13605 base := v_0.Args[0]
13606 mem := v_1
13607 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13608 break
13609 }
13610 v.reset(OpAMD64MOVSSload)
13611 v.AuxInt = int32ToAuxInt(off1 + off2)
13612 v.Aux = symToAux(mergeSym(sym1, sym2))
13613 v.AddArg2(base, mem)
13614 return true
13615 }
13616
13617
13618 for {
13619 off := auxIntToInt32(v.AuxInt)
13620 sym := auxToSym(v.Aux)
13621 ptr := v_0
13622 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
13623 break
13624 }
13625 val := v_1.Args[1]
13626 if ptr != v_1.Args[0] {
13627 break
13628 }
13629 v.reset(OpAMD64MOVLi2f)
13630 v.AddArg(val)
13631 return true
13632 }
13633 return false
13634 }
13635 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
13636 v_2 := v.Args[2]
13637 v_1 := v.Args[1]
13638 v_0 := v.Args[0]
13639
13640
13641
13642 for {
13643 off1 := auxIntToInt32(v.AuxInt)
13644 sym := auxToSym(v.Aux)
13645 if v_0.Op != OpAMD64ADDQconst {
13646 break
13647 }
13648 off2 := auxIntToInt32(v_0.AuxInt)
13649 ptr := v_0.Args[0]
13650 val := v_1
13651 mem := v_2
13652 if !(is32Bit(int64(off1) + int64(off2))) {
13653 break
13654 }
13655 v.reset(OpAMD64MOVSSstore)
13656 v.AuxInt = int32ToAuxInt(off1 + off2)
13657 v.Aux = symToAux(sym)
13658 v.AddArg3(ptr, val, mem)
13659 return true
13660 }
13661
13662
13663
13664 for {
13665 off1 := auxIntToInt32(v.AuxInt)
13666 sym1 := auxToSym(v.Aux)
13667 if v_0.Op != OpAMD64LEAQ {
13668 break
13669 }
13670 off2 := auxIntToInt32(v_0.AuxInt)
13671 sym2 := auxToSym(v_0.Aux)
13672 base := v_0.Args[0]
13673 val := v_1
13674 mem := v_2
13675 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13676 break
13677 }
13678 v.reset(OpAMD64MOVSSstore)
13679 v.AuxInt = int32ToAuxInt(off1 + off2)
13680 v.Aux = symToAux(mergeSym(sym1, sym2))
13681 v.AddArg3(base, val, mem)
13682 return true
13683 }
13684
13685
13686 for {
13687 off := auxIntToInt32(v.AuxInt)
13688 sym := auxToSym(v.Aux)
13689 ptr := v_0
13690 if v_1.Op != OpAMD64MOVLi2f {
13691 break
13692 }
13693 val := v_1.Args[0]
13694 mem := v_2
13695 v.reset(OpAMD64MOVLstore)
13696 v.AuxInt = int32ToAuxInt(off)
13697 v.Aux = symToAux(sym)
13698 v.AddArg3(ptr, val, mem)
13699 return true
13700 }
13701 return false
13702 }
13703 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
13704 v_0 := v.Args[0]
13705 b := v.Block
13706
13707
13708
13709 for {
13710 x := v_0
13711 if x.Op != OpAMD64MOVWload {
13712 break
13713 }
13714 off := auxIntToInt32(x.AuxInt)
13715 sym := auxToSym(x.Aux)
13716 mem := x.Args[1]
13717 ptr := x.Args[0]
13718 if !(x.Uses == 1 && clobber(x)) {
13719 break
13720 }
13721 b = x.Block
13722 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
13723 v.copyOf(v0)
13724 v0.AuxInt = int32ToAuxInt(off)
13725 v0.Aux = symToAux(sym)
13726 v0.AddArg2(ptr, mem)
13727 return true
13728 }
13729
13730
13731
13732 for {
13733 x := v_0
13734 if x.Op != OpAMD64MOVLload {
13735 break
13736 }
13737 off := auxIntToInt32(x.AuxInt)
13738 sym := auxToSym(x.Aux)
13739 mem := x.Args[1]
13740 ptr := x.Args[0]
13741 if !(x.Uses == 1 && clobber(x)) {
13742 break
13743 }
13744 b = x.Block
13745 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
13746 v.copyOf(v0)
13747 v0.AuxInt = int32ToAuxInt(off)
13748 v0.Aux = symToAux(sym)
13749 v0.AddArg2(ptr, mem)
13750 return true
13751 }
13752
13753
13754
13755 for {
13756 x := v_0
13757 if x.Op != OpAMD64MOVQload {
13758 break
13759 }
13760 off := auxIntToInt32(x.AuxInt)
13761 sym := auxToSym(x.Aux)
13762 mem := x.Args[1]
13763 ptr := x.Args[0]
13764 if !(x.Uses == 1 && clobber(x)) {
13765 break
13766 }
13767 b = x.Block
13768 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
13769 v.copyOf(v0)
13770 v0.AuxInt = int32ToAuxInt(off)
13771 v0.Aux = symToAux(sym)
13772 v0.AddArg2(ptr, mem)
13773 return true
13774 }
13775
13776
13777
13778 for {
13779 if v_0.Op != OpAMD64ANDLconst {
13780 break
13781 }
13782 c := auxIntToInt32(v_0.AuxInt)
13783 x := v_0.Args[0]
13784 if !(c&0x8000 == 0) {
13785 break
13786 }
13787 v.reset(OpAMD64ANDLconst)
13788 v.AuxInt = int32ToAuxInt(c & 0x7fff)
13789 v.AddArg(x)
13790 return true
13791 }
13792
13793
13794 for {
13795 if v_0.Op != OpAMD64MOVWQSX {
13796 break
13797 }
13798 x := v_0.Args[0]
13799 v.reset(OpAMD64MOVWQSX)
13800 v.AddArg(x)
13801 return true
13802 }
13803
13804
13805 for {
13806 if v_0.Op != OpAMD64MOVBQSX {
13807 break
13808 }
13809 x := v_0.Args[0]
13810 v.reset(OpAMD64MOVBQSX)
13811 v.AddArg(x)
13812 return true
13813 }
13814 return false
13815 }
13816 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
13817 v_1 := v.Args[1]
13818 v_0 := v.Args[0]
13819
13820
13821
13822 for {
13823 off := auxIntToInt32(v.AuxInt)
13824 sym := auxToSym(v.Aux)
13825 ptr := v_0
13826 if v_1.Op != OpAMD64MOVWstore {
13827 break
13828 }
13829 off2 := auxIntToInt32(v_1.AuxInt)
13830 sym2 := auxToSym(v_1.Aux)
13831 x := v_1.Args[1]
13832 ptr2 := v_1.Args[0]
13833 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13834 break
13835 }
13836 v.reset(OpAMD64MOVWQSX)
13837 v.AddArg(x)
13838 return true
13839 }
13840
13841
13842
13843 for {
13844 off1 := auxIntToInt32(v.AuxInt)
13845 sym1 := auxToSym(v.Aux)
13846 if v_0.Op != OpAMD64LEAQ {
13847 break
13848 }
13849 off2 := auxIntToInt32(v_0.AuxInt)
13850 sym2 := auxToSym(v_0.Aux)
13851 base := v_0.Args[0]
13852 mem := v_1
13853 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13854 break
13855 }
13856 v.reset(OpAMD64MOVWQSXload)
13857 v.AuxInt = int32ToAuxInt(off1 + off2)
13858 v.Aux = symToAux(mergeSym(sym1, sym2))
13859 v.AddArg2(base, mem)
13860 return true
13861 }
13862 return false
13863 }
13864 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
13865 v_0 := v.Args[0]
13866 b := v.Block
13867
13868
13869
13870 for {
13871 x := v_0
13872 if x.Op != OpAMD64MOVWload {
13873 break
13874 }
13875 off := auxIntToInt32(x.AuxInt)
13876 sym := auxToSym(x.Aux)
13877 mem := x.Args[1]
13878 ptr := x.Args[0]
13879 if !(x.Uses == 1 && clobber(x)) {
13880 break
13881 }
13882 b = x.Block
13883 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13884 v.copyOf(v0)
13885 v0.AuxInt = int32ToAuxInt(off)
13886 v0.Aux = symToAux(sym)
13887 v0.AddArg2(ptr, mem)
13888 return true
13889 }
13890
13891
13892
13893 for {
13894 x := v_0
13895 if x.Op != OpAMD64MOVLload {
13896 break
13897 }
13898 off := auxIntToInt32(x.AuxInt)
13899 sym := auxToSym(x.Aux)
13900 mem := x.Args[1]
13901 ptr := x.Args[0]
13902 if !(x.Uses == 1 && clobber(x)) {
13903 break
13904 }
13905 b = x.Block
13906 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13907 v.copyOf(v0)
13908 v0.AuxInt = int32ToAuxInt(off)
13909 v0.Aux = symToAux(sym)
13910 v0.AddArg2(ptr, mem)
13911 return true
13912 }
13913
13914
13915
13916 for {
13917 x := v_0
13918 if x.Op != OpAMD64MOVQload {
13919 break
13920 }
13921 off := auxIntToInt32(x.AuxInt)
13922 sym := auxToSym(x.Aux)
13923 mem := x.Args[1]
13924 ptr := x.Args[0]
13925 if !(x.Uses == 1 && clobber(x)) {
13926 break
13927 }
13928 b = x.Block
13929 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13930 v.copyOf(v0)
13931 v0.AuxInt = int32ToAuxInt(off)
13932 v0.Aux = symToAux(sym)
13933 v0.AddArg2(ptr, mem)
13934 return true
13935 }
13936
13937
13938
13939 for {
13940 x := v_0
13941 if !(zeroUpper48Bits(x, 3)) {
13942 break
13943 }
13944 v.copyOf(x)
13945 return true
13946 }
13947
13948
13949 for {
13950 if v_0.Op != OpAMD64ANDLconst {
13951 break
13952 }
13953 c := auxIntToInt32(v_0.AuxInt)
13954 x := v_0.Args[0]
13955 v.reset(OpAMD64ANDLconst)
13956 v.AuxInt = int32ToAuxInt(c & 0xffff)
13957 v.AddArg(x)
13958 return true
13959 }
13960
13961
13962 for {
13963 if v_0.Op != OpAMD64MOVWQZX {
13964 break
13965 }
13966 x := v_0.Args[0]
13967 v.reset(OpAMD64MOVWQZX)
13968 v.AddArg(x)
13969 return true
13970 }
13971
13972
13973 for {
13974 if v_0.Op != OpAMD64MOVBQZX {
13975 break
13976 }
13977 x := v_0.Args[0]
13978 v.reset(OpAMD64MOVBQZX)
13979 v.AddArg(x)
13980 return true
13981 }
13982 return false
13983 }
13984 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
13985 v_1 := v.Args[1]
13986 v_0 := v.Args[0]
13987 b := v.Block
13988 config := b.Func.Config
13989
13990
13991
13992 for {
13993 off := auxIntToInt32(v.AuxInt)
13994 sym := auxToSym(v.Aux)
13995 ptr := v_0
13996 if v_1.Op != OpAMD64MOVWstore {
13997 break
13998 }
13999 off2 := auxIntToInt32(v_1.AuxInt)
14000 sym2 := auxToSym(v_1.Aux)
14001 x := v_1.Args[1]
14002 ptr2 := v_1.Args[0]
14003 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
14004 break
14005 }
14006 v.reset(OpAMD64MOVWQZX)
14007 v.AddArg(x)
14008 return true
14009 }
14010
14011
14012
14013 for {
14014 off1 := auxIntToInt32(v.AuxInt)
14015 sym := auxToSym(v.Aux)
14016 if v_0.Op != OpAMD64ADDQconst {
14017 break
14018 }
14019 off2 := auxIntToInt32(v_0.AuxInt)
14020 ptr := v_0.Args[0]
14021 mem := v_1
14022 if !(is32Bit(int64(off1) + int64(off2))) {
14023 break
14024 }
14025 v.reset(OpAMD64MOVWload)
14026 v.AuxInt = int32ToAuxInt(off1 + off2)
14027 v.Aux = symToAux(sym)
14028 v.AddArg2(ptr, mem)
14029 return true
14030 }
14031
14032
14033
14034 for {
14035 off1 := auxIntToInt32(v.AuxInt)
14036 sym1 := auxToSym(v.Aux)
14037 if v_0.Op != OpAMD64LEAQ {
14038 break
14039 }
14040 off2 := auxIntToInt32(v_0.AuxInt)
14041 sym2 := auxToSym(v_0.Aux)
14042 base := v_0.Args[0]
14043 mem := v_1
14044 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14045 break
14046 }
14047 v.reset(OpAMD64MOVWload)
14048 v.AuxInt = int32ToAuxInt(off1 + off2)
14049 v.Aux = symToAux(mergeSym(sym1, sym2))
14050 v.AddArg2(base, mem)
14051 return true
14052 }
14053
14054
14055
14056 for {
14057 off := auxIntToInt32(v.AuxInt)
14058 sym := auxToSym(v.Aux)
14059 if v_0.Op != OpSB || !(symIsRO(sym)) {
14060 break
14061 }
14062 v.reset(OpAMD64MOVLconst)
14063 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
14064 return true
14065 }
14066 return false
14067 }
14068 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
14069 v_2 := v.Args[2]
14070 v_1 := v.Args[1]
14071 v_0 := v.Args[0]
14072 b := v.Block
14073 typ := &b.Func.Config.Types
14074
14075
14076 for {
14077 off := auxIntToInt32(v.AuxInt)
14078 sym := auxToSym(v.Aux)
14079 ptr := v_0
14080 if v_1.Op != OpAMD64MOVWQSX {
14081 break
14082 }
14083 x := v_1.Args[0]
14084 mem := v_2
14085 v.reset(OpAMD64MOVWstore)
14086 v.AuxInt = int32ToAuxInt(off)
14087 v.Aux = symToAux(sym)
14088 v.AddArg3(ptr, x, mem)
14089 return true
14090 }
14091
14092
14093 for {
14094 off := auxIntToInt32(v.AuxInt)
14095 sym := auxToSym(v.Aux)
14096 ptr := v_0
14097 if v_1.Op != OpAMD64MOVWQZX {
14098 break
14099 }
14100 x := v_1.Args[0]
14101 mem := v_2
14102 v.reset(OpAMD64MOVWstore)
14103 v.AuxInt = int32ToAuxInt(off)
14104 v.Aux = symToAux(sym)
14105 v.AddArg3(ptr, x, mem)
14106 return true
14107 }
14108
14109
14110
14111 for {
14112 off1 := auxIntToInt32(v.AuxInt)
14113 sym := auxToSym(v.Aux)
14114 if v_0.Op != OpAMD64ADDQconst {
14115 break
14116 }
14117 off2 := auxIntToInt32(v_0.AuxInt)
14118 ptr := v_0.Args[0]
14119 val := v_1
14120 mem := v_2
14121 if !(is32Bit(int64(off1) + int64(off2))) {
14122 break
14123 }
14124 v.reset(OpAMD64MOVWstore)
14125 v.AuxInt = int32ToAuxInt(off1 + off2)
14126 v.Aux = symToAux(sym)
14127 v.AddArg3(ptr, val, mem)
14128 return true
14129 }
14130
14131
14132 for {
14133 off := auxIntToInt32(v.AuxInt)
14134 sym := auxToSym(v.Aux)
14135 ptr := v_0
14136 if v_1.Op != OpAMD64MOVLconst {
14137 break
14138 }
14139 c := auxIntToInt32(v_1.AuxInt)
14140 mem := v_2
14141 v.reset(OpAMD64MOVWstoreconst)
14142 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
14143 v.Aux = symToAux(sym)
14144 v.AddArg2(ptr, mem)
14145 return true
14146 }
14147
14148
14149 for {
14150 off := auxIntToInt32(v.AuxInt)
14151 sym := auxToSym(v.Aux)
14152 ptr := v_0
14153 if v_1.Op != OpAMD64MOVQconst {
14154 break
14155 }
14156 c := auxIntToInt64(v_1.AuxInt)
14157 mem := v_2
14158 v.reset(OpAMD64MOVWstoreconst)
14159 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
14160 v.Aux = symToAux(sym)
14161 v.AddArg2(ptr, mem)
14162 return true
14163 }
14164
14165
14166
14167 for {
14168 off1 := auxIntToInt32(v.AuxInt)
14169 sym1 := auxToSym(v.Aux)
14170 if v_0.Op != OpAMD64LEAQ {
14171 break
14172 }
14173 off2 := auxIntToInt32(v_0.AuxInt)
14174 sym2 := auxToSym(v_0.Aux)
14175 base := v_0.Args[0]
14176 val := v_1
14177 mem := v_2
14178 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14179 break
14180 }
14181 v.reset(OpAMD64MOVWstore)
14182 v.AuxInt = int32ToAuxInt(off1 + off2)
14183 v.Aux = symToAux(mergeSym(sym1, sym2))
14184 v.AddArg3(base, val, mem)
14185 return true
14186 }
14187
14188
14189
14190 for {
14191 i := auxIntToInt32(v.AuxInt)
14192 s := auxToSym(v.Aux)
14193 p := v_0
14194 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
14195 break
14196 }
14197 w := v_1.Args[0]
14198 x := v_2
14199 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
14200 break
14201 }
14202 mem := x.Args[2]
14203 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
14204 break
14205 }
14206 v.reset(OpAMD64MOVLstore)
14207 v.AuxInt = int32ToAuxInt(i - 2)
14208 v.Aux = symToAux(s)
14209 v.AddArg3(p, w, mem)
14210 return true
14211 }
14212
14213
14214
14215 for {
14216 i := auxIntToInt32(v.AuxInt)
14217 s := auxToSym(v.Aux)
14218 p := v_0
14219 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
14220 break
14221 }
14222 w := v_1.Args[0]
14223 x := v_2
14224 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
14225 break
14226 }
14227 mem := x.Args[2]
14228 if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
14229 break
14230 }
14231 v.reset(OpAMD64MOVLstore)
14232 v.AuxInt = int32ToAuxInt(i - 2)
14233 v.Aux = symToAux(s)
14234 v.AddArg3(p, w, mem)
14235 return true
14236 }
14237
14238
14239
14240 for {
14241 i := auxIntToInt32(v.AuxInt)
14242 s := auxToSym(v.Aux)
14243 p := v_0
14244 if v_1.Op != OpAMD64SHRLconst {
14245 break
14246 }
14247 j := auxIntToInt8(v_1.AuxInt)
14248 w := v_1.Args[0]
14249 x := v_2
14250 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
14251 break
14252 }
14253 mem := x.Args[2]
14254 if p != x.Args[0] {
14255 break
14256 }
14257 w0 := x.Args[1]
14258 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
14259 break
14260 }
14261 v.reset(OpAMD64MOVLstore)
14262 v.AuxInt = int32ToAuxInt(i - 2)
14263 v.Aux = symToAux(s)
14264 v.AddArg3(p, w0, mem)
14265 return true
14266 }
14267
14268
14269
14270 for {
14271 i := auxIntToInt32(v.AuxInt)
14272 s := auxToSym(v.Aux)
14273 p := v_0
14274 if v_1.Op != OpAMD64SHRQconst {
14275 break
14276 }
14277 j := auxIntToInt8(v_1.AuxInt)
14278 w := v_1.Args[0]
14279 x := v_2
14280 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
14281 break
14282 }
14283 mem := x.Args[2]
14284 if p != x.Args[0] {
14285 break
14286 }
14287 w0 := x.Args[1]
14288 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
14289 break
14290 }
14291 v.reset(OpAMD64MOVLstore)
14292 v.AuxInt = int32ToAuxInt(i - 2)
14293 v.Aux = symToAux(s)
14294 v.AddArg3(p, w0, mem)
14295 return true
14296 }
14297
14298
14299
14300 for {
14301 i := auxIntToInt32(v.AuxInt)
14302 s := auxToSym(v.Aux)
14303 p1 := v_0
14304 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
14305 break
14306 }
14307 w := v_1.Args[0]
14308 x := v_2
14309 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
14310 break
14311 }
14312 mem := x.Args[2]
14313 p0 := x.Args[0]
14314 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
14315 break
14316 }
14317 v.reset(OpAMD64MOVLstore)
14318 v.AuxInt = int32ToAuxInt(i)
14319 v.Aux = symToAux(s)
14320 v.AddArg3(p0, w, mem)
14321 return true
14322 }
14323
14324
14325
14326 for {
14327 i := auxIntToInt32(v.AuxInt)
14328 s := auxToSym(v.Aux)
14329 p1 := v_0
14330 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
14331 break
14332 }
14333 w := v_1.Args[0]
14334 x := v_2
14335 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
14336 break
14337 }
14338 mem := x.Args[2]
14339 p0 := x.Args[0]
14340 if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
14341 break
14342 }
14343 v.reset(OpAMD64MOVLstore)
14344 v.AuxInt = int32ToAuxInt(i)
14345 v.Aux = symToAux(s)
14346 v.AddArg3(p0, w, mem)
14347 return true
14348 }
14349
14350
14351
14352 for {
14353 i := auxIntToInt32(v.AuxInt)
14354 s := auxToSym(v.Aux)
14355 p1 := v_0
14356 if v_1.Op != OpAMD64SHRLconst {
14357 break
14358 }
14359 j := auxIntToInt8(v_1.AuxInt)
14360 w := v_1.Args[0]
14361 x := v_2
14362 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
14363 break
14364 }
14365 mem := x.Args[2]
14366 p0 := x.Args[0]
14367 w0 := x.Args[1]
14368 if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
14369 break
14370 }
14371 v.reset(OpAMD64MOVLstore)
14372 v.AuxInt = int32ToAuxInt(i)
14373 v.Aux = symToAux(s)
14374 v.AddArg3(p0, w0, mem)
14375 return true
14376 }
14377
14378
14379
14380 for {
14381 i := auxIntToInt32(v.AuxInt)
14382 s := auxToSym(v.Aux)
14383 p1 := v_0
14384 if v_1.Op != OpAMD64SHRQconst {
14385 break
14386 }
14387 j := auxIntToInt8(v_1.AuxInt)
14388 w := v_1.Args[0]
14389 x := v_2
14390 if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
14391 break
14392 }
14393 mem := x.Args[2]
14394 p0 := x.Args[0]
14395 w0 := x.Args[1]
14396 if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
14397 break
14398 }
14399 v.reset(OpAMD64MOVLstore)
14400 v.AuxInt = int32ToAuxInt(i)
14401 v.Aux = symToAux(s)
14402 v.AddArg3(p0, w0, mem)
14403 return true
14404 }
14405
14406
14407
14408 for {
14409 i := auxIntToInt32(v.AuxInt)
14410 s := auxToSym(v.Aux)
14411 p := v_0
14412 x1 := v_1
14413 if x1.Op != OpAMD64MOVWload {
14414 break
14415 }
14416 j := auxIntToInt32(x1.AuxInt)
14417 s2 := auxToSym(x1.Aux)
14418 mem := x1.Args[1]
14419 p2 := x1.Args[0]
14420 mem2 := v_2
14421 if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
14422 break
14423 }
14424 _ = mem2.Args[2]
14425 if p != mem2.Args[0] {
14426 break
14427 }
14428 x2 := mem2.Args[1]
14429 if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
14430 break
14431 }
14432 _ = x2.Args[1]
14433 if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
14434 break
14435 }
14436 v.reset(OpAMD64MOVLstore)
14437 v.AuxInt = int32ToAuxInt(i - 2)
14438 v.Aux = symToAux(s)
14439 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
14440 v0.AuxInt = int32ToAuxInt(j - 2)
14441 v0.Aux = symToAux(s2)
14442 v0.AddArg2(p2, mem)
14443 v.AddArg3(p, v0, mem)
14444 return true
14445 }
14446 return false
14447 }
14448 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
14449 v_1 := v.Args[1]
14450 v_0 := v.Args[0]
14451
14452
14453
14454 for {
14455 sc := auxIntToValAndOff(v.AuxInt)
14456 s := auxToSym(v.Aux)
14457 if v_0.Op != OpAMD64ADDQconst {
14458 break
14459 }
14460 off := auxIntToInt32(v_0.AuxInt)
14461 ptr := v_0.Args[0]
14462 mem := v_1
14463 if !(ValAndOff(sc).canAdd32(off)) {
14464 break
14465 }
14466 v.reset(OpAMD64MOVWstoreconst)
14467 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
14468 v.Aux = symToAux(s)
14469 v.AddArg2(ptr, mem)
14470 return true
14471 }
14472
14473
14474
14475 for {
14476 sc := auxIntToValAndOff(v.AuxInt)
14477 sym1 := auxToSym(v.Aux)
14478 if v_0.Op != OpAMD64LEAQ {
14479 break
14480 }
14481 off := auxIntToInt32(v_0.AuxInt)
14482 sym2 := auxToSym(v_0.Aux)
14483 ptr := v_0.Args[0]
14484 mem := v_1
14485 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
14486 break
14487 }
14488 v.reset(OpAMD64MOVWstoreconst)
14489 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
14490 v.Aux = symToAux(mergeSym(sym1, sym2))
14491 v.AddArg2(ptr, mem)
14492 return true
14493 }
14494
14495
14496
14497 for {
14498 c := auxIntToValAndOff(v.AuxInt)
14499 s := auxToSym(v.Aux)
14500 p := v_0
14501 x := v_1
14502 if x.Op != OpAMD64MOVWstoreconst {
14503 break
14504 }
14505 a := auxIntToValAndOff(x.AuxInt)
14506 if auxToSym(x.Aux) != s {
14507 break
14508 }
14509 mem := x.Args[1]
14510 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
14511 break
14512 }
14513 v.reset(OpAMD64MOVLstoreconst)
14514 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
14515 v.Aux = symToAux(s)
14516 v.AddArg2(p, mem)
14517 return true
14518 }
14519
14520
14521
14522 for {
14523 a := auxIntToValAndOff(v.AuxInt)
14524 s := auxToSym(v.Aux)
14525 p := v_0
14526 x := v_1
14527 if x.Op != OpAMD64MOVWstoreconst {
14528 break
14529 }
14530 c := auxIntToValAndOff(x.AuxInt)
14531 if auxToSym(x.Aux) != s {
14532 break
14533 }
14534 mem := x.Args[1]
14535 if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
14536 break
14537 }
14538 v.reset(OpAMD64MOVLstoreconst)
14539 v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
14540 v.Aux = symToAux(s)
14541 v.AddArg2(p, mem)
14542 return true
14543 }
14544 return false
14545 }
14546 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
14547 v_1 := v.Args[1]
14548 v_0 := v.Args[0]
14549
14550
14551 for {
14552 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14553 x := v_0
14554 if v_1.Op != OpAMD64MOVLconst {
14555 continue
14556 }
14557 c := auxIntToInt32(v_1.AuxInt)
14558 v.reset(OpAMD64MULLconst)
14559 v.AuxInt = int32ToAuxInt(c)
14560 v.AddArg(x)
14561 return true
14562 }
14563 break
14564 }
14565 return false
14566 }
14567 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
14568 v_0 := v.Args[0]
14569 b := v.Block
14570
14571
14572 for {
14573 c := auxIntToInt32(v.AuxInt)
14574 if v_0.Op != OpAMD64MULLconst {
14575 break
14576 }
14577 d := auxIntToInt32(v_0.AuxInt)
14578 x := v_0.Args[0]
14579 v.reset(OpAMD64MULLconst)
14580 v.AuxInt = int32ToAuxInt(c * d)
14581 v.AddArg(x)
14582 return true
14583 }
14584
14585
14586 for {
14587 if auxIntToInt32(v.AuxInt) != -9 {
14588 break
14589 }
14590 x := v_0
14591 v.reset(OpAMD64NEGL)
14592 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
14593 v0.AddArg2(x, x)
14594 v.AddArg(v0)
14595 return true
14596 }
14597
14598
14599 for {
14600 if auxIntToInt32(v.AuxInt) != -5 {
14601 break
14602 }
14603 x := v_0
14604 v.reset(OpAMD64NEGL)
14605 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
14606 v0.AddArg2(x, x)
14607 v.AddArg(v0)
14608 return true
14609 }
14610
14611
14612 for {
14613 if auxIntToInt32(v.AuxInt) != -3 {
14614 break
14615 }
14616 x := v_0
14617 v.reset(OpAMD64NEGL)
14618 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
14619 v0.AddArg2(x, x)
14620 v.AddArg(v0)
14621 return true
14622 }
14623
14624
14625 for {
14626 if auxIntToInt32(v.AuxInt) != -1 {
14627 break
14628 }
14629 x := v_0
14630 v.reset(OpAMD64NEGL)
14631 v.AddArg(x)
14632 return true
14633 }
14634
14635
14636 for {
14637 if auxIntToInt32(v.AuxInt) != 0 {
14638 break
14639 }
14640 v.reset(OpAMD64MOVLconst)
14641 v.AuxInt = int32ToAuxInt(0)
14642 return true
14643 }
14644
14645
14646 for {
14647 if auxIntToInt32(v.AuxInt) != 1 {
14648 break
14649 }
14650 x := v_0
14651 v.copyOf(x)
14652 return true
14653 }
14654
14655
14656 for {
14657 if auxIntToInt32(v.AuxInt) != 3 {
14658 break
14659 }
14660 x := v_0
14661 v.reset(OpAMD64LEAL2)
14662 v.AddArg2(x, x)
14663 return true
14664 }
14665
14666
14667 for {
14668 if auxIntToInt32(v.AuxInt) != 5 {
14669 break
14670 }
14671 x := v_0
14672 v.reset(OpAMD64LEAL4)
14673 v.AddArg2(x, x)
14674 return true
14675 }
14676
14677
14678 for {
14679 if auxIntToInt32(v.AuxInt) != 7 {
14680 break
14681 }
14682 x := v_0
14683 v.reset(OpAMD64LEAL2)
14684 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
14685 v0.AddArg2(x, x)
14686 v.AddArg2(x, v0)
14687 return true
14688 }
14689
14690
14691 for {
14692 if auxIntToInt32(v.AuxInt) != 9 {
14693 break
14694 }
14695 x := v_0
14696 v.reset(OpAMD64LEAL8)
14697 v.AddArg2(x, x)
14698 return true
14699 }
14700
14701
14702 for {
14703 if auxIntToInt32(v.AuxInt) != 11 {
14704 break
14705 }
14706 x := v_0
14707 v.reset(OpAMD64LEAL2)
14708 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
14709 v0.AddArg2(x, x)
14710 v.AddArg2(x, v0)
14711 return true
14712 }
14713
14714
14715 for {
14716 if auxIntToInt32(v.AuxInt) != 13 {
14717 break
14718 }
14719 x := v_0
14720 v.reset(OpAMD64LEAL4)
14721 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
14722 v0.AddArg2(x, x)
14723 v.AddArg2(x, v0)
14724 return true
14725 }
14726
14727
14728 for {
14729 if auxIntToInt32(v.AuxInt) != 19 {
14730 break
14731 }
14732 x := v_0
14733 v.reset(OpAMD64LEAL2)
14734 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
14735 v0.AddArg2(x, x)
14736 v.AddArg2(x, v0)
14737 return true
14738 }
14739
14740
14741 for {
14742 if auxIntToInt32(v.AuxInt) != 21 {
14743 break
14744 }
14745 x := v_0
14746 v.reset(OpAMD64LEAL4)
14747 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
14748 v0.AddArg2(x, x)
14749 v.AddArg2(x, v0)
14750 return true
14751 }
14752
14753
14754 for {
14755 if auxIntToInt32(v.AuxInt) != 25 {
14756 break
14757 }
14758 x := v_0
14759 v.reset(OpAMD64LEAL8)
14760 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
14761 v0.AddArg2(x, x)
14762 v.AddArg2(x, v0)
14763 return true
14764 }
14765
14766
14767 for {
14768 if auxIntToInt32(v.AuxInt) != 27 {
14769 break
14770 }
14771 x := v_0
14772 v.reset(OpAMD64LEAL8)
14773 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
14774 v0.AddArg2(x, x)
14775 v.AddArg2(v0, v0)
14776 return true
14777 }
14778
14779
14780 for {
14781 if auxIntToInt32(v.AuxInt) != 37 {
14782 break
14783 }
14784 x := v_0
14785 v.reset(OpAMD64LEAL4)
14786 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
14787 v0.AddArg2(x, x)
14788 v.AddArg2(x, v0)
14789 return true
14790 }
14791
14792
14793 for {
14794 if auxIntToInt32(v.AuxInt) != 41 {
14795 break
14796 }
14797 x := v_0
14798 v.reset(OpAMD64LEAL8)
14799 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
14800 v0.AddArg2(x, x)
14801 v.AddArg2(x, v0)
14802 return true
14803 }
14804
14805
14806 for {
14807 if auxIntToInt32(v.AuxInt) != 45 {
14808 break
14809 }
14810 x := v_0
14811 v.reset(OpAMD64LEAL8)
14812 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
14813 v0.AddArg2(x, x)
14814 v.AddArg2(v0, v0)
14815 return true
14816 }
14817
14818
14819 for {
14820 if auxIntToInt32(v.AuxInt) != 73 {
14821 break
14822 }
14823 x := v_0
14824 v.reset(OpAMD64LEAL8)
14825 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
14826 v0.AddArg2(x, x)
14827 v.AddArg2(x, v0)
14828 return true
14829 }
14830
14831
14832 for {
14833 if auxIntToInt32(v.AuxInt) != 81 {
14834 break
14835 }
14836 x := v_0
14837 v.reset(OpAMD64LEAL8)
14838 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
14839 v0.AddArg2(x, x)
14840 v.AddArg2(v0, v0)
14841 return true
14842 }
14843
14844
14845
14846 for {
14847 c := auxIntToInt32(v.AuxInt)
14848 x := v_0
14849 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
14850 break
14851 }
14852 v.reset(OpAMD64SUBL)
14853 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
14854 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
14855 v0.AddArg(x)
14856 v.AddArg2(v0, x)
14857 return true
14858 }
14859
14860
14861
14862 for {
14863 c := auxIntToInt32(v.AuxInt)
14864 x := v_0
14865 if !(isPowerOfTwo32(c-1) && c >= 17) {
14866 break
14867 }
14868 v.reset(OpAMD64LEAL1)
14869 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
14870 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
14871 v0.AddArg(x)
14872 v.AddArg2(v0, x)
14873 return true
14874 }
14875
14876
14877
14878 for {
14879 c := auxIntToInt32(v.AuxInt)
14880 x := v_0
14881 if !(isPowerOfTwo32(c-2) && c >= 34) {
14882 break
14883 }
14884 v.reset(OpAMD64LEAL2)
14885 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
14886 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
14887 v0.AddArg(x)
14888 v.AddArg2(v0, x)
14889 return true
14890 }
14891
14892
14893
14894 for {
14895 c := auxIntToInt32(v.AuxInt)
14896 x := v_0
14897 if !(isPowerOfTwo32(c-4) && c >= 68) {
14898 break
14899 }
14900 v.reset(OpAMD64LEAL4)
14901 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
14902 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
14903 v0.AddArg(x)
14904 v.AddArg2(v0, x)
14905 return true
14906 }
14907
14908
14909
14910 for {
14911 c := auxIntToInt32(v.AuxInt)
14912 x := v_0
14913 if !(isPowerOfTwo32(c-8) && c >= 136) {
14914 break
14915 }
14916 v.reset(OpAMD64LEAL8)
14917 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
14918 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
14919 v0.AddArg(x)
14920 v.AddArg2(v0, x)
14921 return true
14922 }
14923
14924
14925
14926 for {
14927 c := auxIntToInt32(v.AuxInt)
14928 x := v_0
14929 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
14930 break
14931 }
14932 v.reset(OpAMD64SHLLconst)
14933 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
14934 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
14935 v0.AddArg2(x, x)
14936 v.AddArg(v0)
14937 return true
14938 }
14939
14940
14941
14942 for {
14943 c := auxIntToInt32(v.AuxInt)
14944 x := v_0
14945 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
14946 break
14947 }
14948 v.reset(OpAMD64SHLLconst)
14949 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
14950 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
14951 v0.AddArg2(x, x)
14952 v.AddArg(v0)
14953 return true
14954 }
14955
14956
14957
14958 for {
14959 c := auxIntToInt32(v.AuxInt)
14960 x := v_0
14961 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
14962 break
14963 }
14964 v.reset(OpAMD64SHLLconst)
14965 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
14966 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
14967 v0.AddArg2(x, x)
14968 v.AddArg(v0)
14969 return true
14970 }
14971
14972
14973 for {
14974 c := auxIntToInt32(v.AuxInt)
14975 if v_0.Op != OpAMD64MOVLconst {
14976 break
14977 }
14978 d := auxIntToInt32(v_0.AuxInt)
14979 v.reset(OpAMD64MOVLconst)
14980 v.AuxInt = int32ToAuxInt(c * d)
14981 return true
14982 }
14983 return false
14984 }
14985 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
14986 v_1 := v.Args[1]
14987 v_0 := v.Args[0]
14988
14989
14990
14991 for {
14992 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14993 x := v_0
14994 if v_1.Op != OpAMD64MOVQconst {
14995 continue
14996 }
14997 c := auxIntToInt64(v_1.AuxInt)
14998 if !(is32Bit(c)) {
14999 continue
15000 }
15001 v.reset(OpAMD64MULQconst)
15002 v.AuxInt = int32ToAuxInt(int32(c))
15003 v.AddArg(x)
15004 return true
15005 }
15006 break
15007 }
15008 return false
15009 }
15010 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
15011 v_0 := v.Args[0]
15012 b := v.Block
15013
15014
15015
15016 for {
15017 c := auxIntToInt32(v.AuxInt)
15018 if v_0.Op != OpAMD64MULQconst {
15019 break
15020 }
15021 d := auxIntToInt32(v_0.AuxInt)
15022 x := v_0.Args[0]
15023 if !(is32Bit(int64(c) * int64(d))) {
15024 break
15025 }
15026 v.reset(OpAMD64MULQconst)
15027 v.AuxInt = int32ToAuxInt(c * d)
15028 v.AddArg(x)
15029 return true
15030 }
15031
15032
15033 for {
15034 if auxIntToInt32(v.AuxInt) != -9 {
15035 break
15036 }
15037 x := v_0
15038 v.reset(OpAMD64NEGQ)
15039 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
15040 v0.AddArg2(x, x)
15041 v.AddArg(v0)
15042 return true
15043 }
15044
15045
15046 for {
15047 if auxIntToInt32(v.AuxInt) != -5 {
15048 break
15049 }
15050 x := v_0
15051 v.reset(OpAMD64NEGQ)
15052 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
15053 v0.AddArg2(x, x)
15054 v.AddArg(v0)
15055 return true
15056 }
15057
15058
15059 for {
15060 if auxIntToInt32(v.AuxInt) != -3 {
15061 break
15062 }
15063 x := v_0
15064 v.reset(OpAMD64NEGQ)
15065 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
15066 v0.AddArg2(x, x)
15067 v.AddArg(v0)
15068 return true
15069 }
15070
15071
15072 for {
15073 if auxIntToInt32(v.AuxInt) != -1 {
15074 break
15075 }
15076 x := v_0
15077 v.reset(OpAMD64NEGQ)
15078 v.AddArg(x)
15079 return true
15080 }
15081
15082
15083 for {
15084 if auxIntToInt32(v.AuxInt) != 0 {
15085 break
15086 }
15087 v.reset(OpAMD64MOVQconst)
15088 v.AuxInt = int64ToAuxInt(0)
15089 return true
15090 }
15091
15092
15093 for {
15094 if auxIntToInt32(v.AuxInt) != 1 {
15095 break
15096 }
15097 x := v_0
15098 v.copyOf(x)
15099 return true
15100 }
15101
15102
15103 for {
15104 if auxIntToInt32(v.AuxInt) != 3 {
15105 break
15106 }
15107 x := v_0
15108 v.reset(OpAMD64LEAQ2)
15109 v.AddArg2(x, x)
15110 return true
15111 }
15112
15113
15114 for {
15115 if auxIntToInt32(v.AuxInt) != 5 {
15116 break
15117 }
15118 x := v_0
15119 v.reset(OpAMD64LEAQ4)
15120 v.AddArg2(x, x)
15121 return true
15122 }
15123
15124
15125 for {
15126 if auxIntToInt32(v.AuxInt) != 7 {
15127 break
15128 }
15129 x := v_0
15130 v.reset(OpAMD64LEAQ2)
15131 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
15132 v0.AddArg2(x, x)
15133 v.AddArg2(x, v0)
15134 return true
15135 }
15136
15137
15138 for {
15139 if auxIntToInt32(v.AuxInt) != 9 {
15140 break
15141 }
15142 x := v_0
15143 v.reset(OpAMD64LEAQ8)
15144 v.AddArg2(x, x)
15145 return true
15146 }
15147
15148
15149 for {
15150 if auxIntToInt32(v.AuxInt) != 11 {
15151 break
15152 }
15153 x := v_0
15154 v.reset(OpAMD64LEAQ2)
15155 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
15156 v0.AddArg2(x, x)
15157 v.AddArg2(x, v0)
15158 return true
15159 }
15160
15161
15162 for {
15163 if auxIntToInt32(v.AuxInt) != 13 {
15164 break
15165 }
15166 x := v_0
15167 v.reset(OpAMD64LEAQ4)
15168 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
15169 v0.AddArg2(x, x)
15170 v.AddArg2(x, v0)
15171 return true
15172 }
15173
15174
15175 for {
15176 if auxIntToInt32(v.AuxInt) != 19 {
15177 break
15178 }
15179 x := v_0
15180 v.reset(OpAMD64LEAQ2)
15181 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
15182 v0.AddArg2(x, x)
15183 v.AddArg2(x, v0)
15184 return true
15185 }
15186
15187
15188 for {
15189 if auxIntToInt32(v.AuxInt) != 21 {
15190 break
15191 }
15192 x := v_0
15193 v.reset(OpAMD64LEAQ4)
15194 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
15195 v0.AddArg2(x, x)
15196 v.AddArg2(x, v0)
15197 return true
15198 }
15199
15200
15201 for {
15202 if auxIntToInt32(v.AuxInt) != 25 {
15203 break
15204 }
15205 x := v_0
15206 v.reset(OpAMD64LEAQ8)
15207 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
15208 v0.AddArg2(x, x)
15209 v.AddArg2(x, v0)
15210 return true
15211 }
15212
15213
15214 for {
15215 if auxIntToInt32(v.AuxInt) != 27 {
15216 break
15217 }
15218 x := v_0
15219 v.reset(OpAMD64LEAQ8)
15220 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
15221 v0.AddArg2(x, x)
15222 v.AddArg2(v0, v0)
15223 return true
15224 }
15225
15226
15227 for {
15228 if auxIntToInt32(v.AuxInt) != 37 {
15229 break
15230 }
15231 x := v_0
15232 v.reset(OpAMD64LEAQ4)
15233 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
15234 v0.AddArg2(x, x)
15235 v.AddArg2(x, v0)
15236 return true
15237 }
15238
15239
15240 for {
15241 if auxIntToInt32(v.AuxInt) != 41 {
15242 break
15243 }
15244 x := v_0
15245 v.reset(OpAMD64LEAQ8)
15246 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
15247 v0.AddArg2(x, x)
15248 v.AddArg2(x, v0)
15249 return true
15250 }
15251
15252
15253 for {
15254 if auxIntToInt32(v.AuxInt) != 45 {
15255 break
15256 }
15257 x := v_0
15258 v.reset(OpAMD64LEAQ8)
15259 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
15260 v0.AddArg2(x, x)
15261 v.AddArg2(v0, v0)
15262 return true
15263 }
15264
15265
15266 for {
15267 if auxIntToInt32(v.AuxInt) != 73 {
15268 break
15269 }
15270 x := v_0
15271 v.reset(OpAMD64LEAQ8)
15272 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
15273 v0.AddArg2(x, x)
15274 v.AddArg2(x, v0)
15275 return true
15276 }
15277
15278
15279 for {
15280 if auxIntToInt32(v.AuxInt) != 81 {
15281 break
15282 }
15283 x := v_0
15284 v.reset(OpAMD64LEAQ8)
15285 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
15286 v0.AddArg2(x, x)
15287 v.AddArg2(v0, v0)
15288 return true
15289 }
15290
15291
15292
15293 for {
15294 c := auxIntToInt32(v.AuxInt)
15295 x := v_0
15296 if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
15297 break
15298 }
15299 v.reset(OpAMD64SUBQ)
15300 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
15301 v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
15302 v0.AddArg(x)
15303 v.AddArg2(v0, x)
15304 return true
15305 }
15306
15307
15308
15309 for {
15310 c := auxIntToInt32(v.AuxInt)
15311 x := v_0
15312 if !(isPowerOfTwo32(c-1) && c >= 17) {
15313 break
15314 }
15315 v.reset(OpAMD64LEAQ1)
15316 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
15317 v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
15318 v0.AddArg(x)
15319 v.AddArg2(v0, x)
15320 return true
15321 }
15322
15323
15324
15325 for {
15326 c := auxIntToInt32(v.AuxInt)
15327 x := v_0
15328 if !(isPowerOfTwo32(c-2) && c >= 34) {
15329 break
15330 }
15331 v.reset(OpAMD64LEAQ2)
15332 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
15333 v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
15334 v0.AddArg(x)
15335 v.AddArg2(v0, x)
15336 return true
15337 }
15338
15339
15340
15341 for {
15342 c := auxIntToInt32(v.AuxInt)
15343 x := v_0
15344 if !(isPowerOfTwo32(c-4) && c >= 68) {
15345 break
15346 }
15347 v.reset(OpAMD64LEAQ4)
15348 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
15349 v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
15350 v0.AddArg(x)
15351 v.AddArg2(v0, x)
15352 return true
15353 }
15354
15355
15356
15357 for {
15358 c := auxIntToInt32(v.AuxInt)
15359 x := v_0
15360 if !(isPowerOfTwo32(c-8) && c >= 136) {
15361 break
15362 }
15363 v.reset(OpAMD64LEAQ8)
15364 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
15365 v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
15366 v0.AddArg(x)
15367 v.AddArg2(v0, x)
15368 return true
15369 }
15370
15371
15372
15373 for {
15374 c := auxIntToInt32(v.AuxInt)
15375 x := v_0
15376 if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
15377 break
15378 }
15379 v.reset(OpAMD64SHLQconst)
15380 v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
15381 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
15382 v0.AddArg2(x, x)
15383 v.AddArg(v0)
15384 return true
15385 }
15386
15387
15388
15389 for {
15390 c := auxIntToInt32(v.AuxInt)
15391 x := v_0
15392 if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
15393 break
15394 }
15395 v.reset(OpAMD64SHLQconst)
15396 v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
15397 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
15398 v0.AddArg2(x, x)
15399 v.AddArg(v0)
15400 return true
15401 }
15402
15403
15404
15405 for {
15406 c := auxIntToInt32(v.AuxInt)
15407 x := v_0
15408 if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
15409 break
15410 }
15411 v.reset(OpAMD64SHLQconst)
15412 v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
15413 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
15414 v0.AddArg2(x, x)
15415 v.AddArg(v0)
15416 return true
15417 }
15418
15419
15420 for {
15421 c := auxIntToInt32(v.AuxInt)
15422 if v_0.Op != OpAMD64MOVQconst {
15423 break
15424 }
15425 d := auxIntToInt64(v_0.AuxInt)
15426 v.reset(OpAMD64MOVQconst)
15427 v.AuxInt = int64ToAuxInt(int64(c) * d)
15428 return true
15429 }
15430
15431
15432
15433 for {
15434 c := auxIntToInt32(v.AuxInt)
15435 if v_0.Op != OpAMD64NEGQ {
15436 break
15437 }
15438 x := v_0.Args[0]
15439 if !(c != -(1 << 31)) {
15440 break
15441 }
15442 v.reset(OpAMD64MULQconst)
15443 v.AuxInt = int32ToAuxInt(-c)
15444 v.AddArg(x)
15445 return true
15446 }
15447 return false
15448 }
15449 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
15450 v_1 := v.Args[1]
15451 v_0 := v.Args[0]
15452
15453
15454
15455 for {
15456 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15457 x := v_0
15458 l := v_1
15459 if l.Op != OpAMD64MOVSDload {
15460 continue
15461 }
15462 off := auxIntToInt32(l.AuxInt)
15463 sym := auxToSym(l.Aux)
15464 mem := l.Args[1]
15465 ptr := l.Args[0]
15466 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
15467 continue
15468 }
15469 v.reset(OpAMD64MULSDload)
15470 v.AuxInt = int32ToAuxInt(off)
15471 v.Aux = symToAux(sym)
15472 v.AddArg3(x, ptr, mem)
15473 return true
15474 }
15475 break
15476 }
15477 return false
15478 }
15479 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
15480 v_2 := v.Args[2]
15481 v_1 := v.Args[1]
15482 v_0 := v.Args[0]
15483 b := v.Block
15484 typ := &b.Func.Config.Types
15485
15486
15487
15488 for {
15489 off1 := auxIntToInt32(v.AuxInt)
15490 sym := auxToSym(v.Aux)
15491 val := v_0
15492 if v_1.Op != OpAMD64ADDQconst {
15493 break
15494 }
15495 off2 := auxIntToInt32(v_1.AuxInt)
15496 base := v_1.Args[0]
15497 mem := v_2
15498 if !(is32Bit(int64(off1) + int64(off2))) {
15499 break
15500 }
15501 v.reset(OpAMD64MULSDload)
15502 v.AuxInt = int32ToAuxInt(off1 + off2)
15503 v.Aux = symToAux(sym)
15504 v.AddArg3(val, base, mem)
15505 return true
15506 }
15507
15508
15509
15510 for {
15511 off1 := auxIntToInt32(v.AuxInt)
15512 sym1 := auxToSym(v.Aux)
15513 val := v_0
15514 if v_1.Op != OpAMD64LEAQ {
15515 break
15516 }
15517 off2 := auxIntToInt32(v_1.AuxInt)
15518 sym2 := auxToSym(v_1.Aux)
15519 base := v_1.Args[0]
15520 mem := v_2
15521 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15522 break
15523 }
15524 v.reset(OpAMD64MULSDload)
15525 v.AuxInt = int32ToAuxInt(off1 + off2)
15526 v.Aux = symToAux(mergeSym(sym1, sym2))
15527 v.AddArg3(val, base, mem)
15528 return true
15529 }
15530
15531
15532 for {
15533 off := auxIntToInt32(v.AuxInt)
15534 sym := auxToSym(v.Aux)
15535 x := v_0
15536 ptr := v_1
15537 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
15538 break
15539 }
15540 y := v_2.Args[1]
15541 if ptr != v_2.Args[0] {
15542 break
15543 }
15544 v.reset(OpAMD64MULSD)
15545 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
15546 v0.AddArg(y)
15547 v.AddArg2(x, v0)
15548 return true
15549 }
15550 return false
15551 }
15552 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
15553 v_1 := v.Args[1]
15554 v_0 := v.Args[0]
15555
15556
15557
15558 for {
15559 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15560 x := v_0
15561 l := v_1
15562 if l.Op != OpAMD64MOVSSload {
15563 continue
15564 }
15565 off := auxIntToInt32(l.AuxInt)
15566 sym := auxToSym(l.Aux)
15567 mem := l.Args[1]
15568 ptr := l.Args[0]
15569 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
15570 continue
15571 }
15572 v.reset(OpAMD64MULSSload)
15573 v.AuxInt = int32ToAuxInt(off)
15574 v.Aux = symToAux(sym)
15575 v.AddArg3(x, ptr, mem)
15576 return true
15577 }
15578 break
15579 }
15580 return false
15581 }
15582 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
15583 v_2 := v.Args[2]
15584 v_1 := v.Args[1]
15585 v_0 := v.Args[0]
15586 b := v.Block
15587 typ := &b.Func.Config.Types
15588
15589
15590
15591 for {
15592 off1 := auxIntToInt32(v.AuxInt)
15593 sym := auxToSym(v.Aux)
15594 val := v_0
15595 if v_1.Op != OpAMD64ADDQconst {
15596 break
15597 }
15598 off2 := auxIntToInt32(v_1.AuxInt)
15599 base := v_1.Args[0]
15600 mem := v_2
15601 if !(is32Bit(int64(off1) + int64(off2))) {
15602 break
15603 }
15604 v.reset(OpAMD64MULSSload)
15605 v.AuxInt = int32ToAuxInt(off1 + off2)
15606 v.Aux = symToAux(sym)
15607 v.AddArg3(val, base, mem)
15608 return true
15609 }
15610
15611
15612
15613 for {
15614 off1 := auxIntToInt32(v.AuxInt)
15615 sym1 := auxToSym(v.Aux)
15616 val := v_0
15617 if v_1.Op != OpAMD64LEAQ {
15618 break
15619 }
15620 off2 := auxIntToInt32(v_1.AuxInt)
15621 sym2 := auxToSym(v_1.Aux)
15622 base := v_1.Args[0]
15623 mem := v_2
15624 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
15625 break
15626 }
15627 v.reset(OpAMD64MULSSload)
15628 v.AuxInt = int32ToAuxInt(off1 + off2)
15629 v.Aux = symToAux(mergeSym(sym1, sym2))
15630 v.AddArg3(val, base, mem)
15631 return true
15632 }
15633
15634
15635 for {
15636 off := auxIntToInt32(v.AuxInt)
15637 sym := auxToSym(v.Aux)
15638 x := v_0
15639 ptr := v_1
15640 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
15641 break
15642 }
15643 y := v_2.Args[1]
15644 if ptr != v_2.Args[0] {
15645 break
15646 }
15647 v.reset(OpAMD64MULSS)
15648 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
15649 v0.AddArg(y)
15650 v.AddArg2(x, v0)
15651 return true
15652 }
15653 return false
15654 }
15655 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
15656 v_0 := v.Args[0]
15657
15658
15659 for {
15660 if v_0.Op != OpAMD64NEGL {
15661 break
15662 }
15663 x := v_0.Args[0]
15664 v.copyOf(x)
15665 return true
15666 }
15667
15668
15669
15670 for {
15671 s := v_0
15672 if s.Op != OpAMD64SUBL {
15673 break
15674 }
15675 y := s.Args[1]
15676 x := s.Args[0]
15677 if !(s.Uses == 1) {
15678 break
15679 }
15680 v.reset(OpAMD64SUBL)
15681 v.AddArg2(y, x)
15682 return true
15683 }
15684
15685
15686 for {
15687 if v_0.Op != OpAMD64MOVLconst {
15688 break
15689 }
15690 c := auxIntToInt32(v_0.AuxInt)
15691 v.reset(OpAMD64MOVLconst)
15692 v.AuxInt = int32ToAuxInt(-c)
15693 return true
15694 }
15695 return false
15696 }
15697 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
15698 v_0 := v.Args[0]
15699
15700
15701 for {
15702 if v_0.Op != OpAMD64NEGQ {
15703 break
15704 }
15705 x := v_0.Args[0]
15706 v.copyOf(x)
15707 return true
15708 }
15709
15710
15711
15712 for {
15713 s := v_0
15714 if s.Op != OpAMD64SUBQ {
15715 break
15716 }
15717 y := s.Args[1]
15718 x := s.Args[0]
15719 if !(s.Uses == 1) {
15720 break
15721 }
15722 v.reset(OpAMD64SUBQ)
15723 v.AddArg2(y, x)
15724 return true
15725 }
15726
15727
15728 for {
15729 if v_0.Op != OpAMD64MOVQconst {
15730 break
15731 }
15732 c := auxIntToInt64(v_0.AuxInt)
15733 v.reset(OpAMD64MOVQconst)
15734 v.AuxInt = int64ToAuxInt(-c)
15735 return true
15736 }
15737
15738
15739
15740 for {
15741 if v_0.Op != OpAMD64ADDQconst {
15742 break
15743 }
15744 c := auxIntToInt32(v_0.AuxInt)
15745 v_0_0 := v_0.Args[0]
15746 if v_0_0.Op != OpAMD64NEGQ {
15747 break
15748 }
15749 x := v_0_0.Args[0]
15750 if !(c != -(1 << 31)) {
15751 break
15752 }
15753 v.reset(OpAMD64ADDQconst)
15754 v.AuxInt = int32ToAuxInt(-c)
15755 v.AddArg(x)
15756 return true
15757 }
15758 return false
15759 }
15760 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
15761 v_0 := v.Args[0]
15762
15763
15764 for {
15765 if v_0.Op != OpAMD64MOVLconst {
15766 break
15767 }
15768 c := auxIntToInt32(v_0.AuxInt)
15769 v.reset(OpAMD64MOVLconst)
15770 v.AuxInt = int32ToAuxInt(^c)
15771 return true
15772 }
15773 return false
15774 }
15775 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
15776 v_0 := v.Args[0]
15777
15778
15779 for {
15780 if v_0.Op != OpAMD64MOVQconst {
15781 break
15782 }
15783 c := auxIntToInt64(v_0.AuxInt)
15784 v.reset(OpAMD64MOVQconst)
15785 v.AuxInt = int64ToAuxInt(^c)
15786 return true
15787 }
15788 return false
15789 }
15790 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
15791 v_1 := v.Args[1]
15792 v_0 := v.Args[0]
15793 b := v.Block
15794 typ := &b.Func.Config.Types
15795
15796
15797 for {
15798 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15799 if v_0.Op != OpAMD64SHLL {
15800 continue
15801 }
15802 y := v_0.Args[1]
15803 v_0_0 := v_0.Args[0]
15804 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
15805 continue
15806 }
15807 x := v_1
15808 v.reset(OpAMD64BTSL)
15809 v.AddArg2(x, y)
15810 return true
15811 }
15812 break
15813 }
15814
15815
15816
15817 for {
15818 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15819 if v_0.Op != OpAMD64MOVLconst {
15820 continue
15821 }
15822 c := auxIntToInt32(v_0.AuxInt)
15823 x := v_1
15824 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
15825 continue
15826 }
15827 v.reset(OpAMD64BTSLconst)
15828 v.AuxInt = int8ToAuxInt(int8(log32(c)))
15829 v.AddArg(x)
15830 return true
15831 }
15832 break
15833 }
15834
15835
15836 for {
15837 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15838 x := v_0
15839 if v_1.Op != OpAMD64MOVLconst {
15840 continue
15841 }
15842 c := auxIntToInt32(v_1.AuxInt)
15843 v.reset(OpAMD64ORLconst)
15844 v.AuxInt = int32ToAuxInt(c)
15845 v.AddArg(x)
15846 return true
15847 }
15848 break
15849 }
15850
15851
15852
15853 for {
15854 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15855 if v_0.Op != OpAMD64SHLLconst {
15856 continue
15857 }
15858 c := auxIntToInt8(v_0.AuxInt)
15859 x := v_0.Args[0]
15860 if v_1.Op != OpAMD64SHRLconst {
15861 continue
15862 }
15863 d := auxIntToInt8(v_1.AuxInt)
15864 if x != v_1.Args[0] || !(d == 32-c) {
15865 continue
15866 }
15867 v.reset(OpAMD64ROLLconst)
15868 v.AuxInt = int8ToAuxInt(c)
15869 v.AddArg(x)
15870 return true
15871 }
15872 break
15873 }
15874
15875
15876
15877 for {
15878 t := v.Type
15879 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15880 if v_0.Op != OpAMD64SHLLconst {
15881 continue
15882 }
15883 c := auxIntToInt8(v_0.AuxInt)
15884 x := v_0.Args[0]
15885 if v_1.Op != OpAMD64SHRWconst {
15886 continue
15887 }
15888 d := auxIntToInt8(v_1.AuxInt)
15889 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
15890 continue
15891 }
15892 v.reset(OpAMD64ROLWconst)
15893 v.AuxInt = int8ToAuxInt(c)
15894 v.AddArg(x)
15895 return true
15896 }
15897 break
15898 }
15899
15900
15901
15902 for {
15903 t := v.Type
15904 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15905 if v_0.Op != OpAMD64SHLLconst {
15906 continue
15907 }
15908 c := auxIntToInt8(v_0.AuxInt)
15909 x := v_0.Args[0]
15910 if v_1.Op != OpAMD64SHRBconst {
15911 continue
15912 }
15913 d := auxIntToInt8(v_1.AuxInt)
15914 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
15915 continue
15916 }
15917 v.reset(OpAMD64ROLBconst)
15918 v.AuxInt = int8ToAuxInt(c)
15919 v.AddArg(x)
15920 return true
15921 }
15922 break
15923 }
15924
15925
15926 for {
15927 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15928 if v_0.Op != OpAMD64SHLL {
15929 continue
15930 }
15931 y := v_0.Args[1]
15932 x := v_0.Args[0]
15933 if v_1.Op != OpAMD64ANDL {
15934 continue
15935 }
15936 _ = v_1.Args[1]
15937 v_1_0 := v_1.Args[0]
15938 v_1_1 := v_1.Args[1]
15939 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
15940 if v_1_0.Op != OpAMD64SHRL {
15941 continue
15942 }
15943 _ = v_1_0.Args[1]
15944 if x != v_1_0.Args[0] {
15945 continue
15946 }
15947 v_1_0_1 := v_1_0.Args[1]
15948 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
15949 continue
15950 }
15951 v_1_1_0 := v_1_1.Args[0]
15952 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
15953 continue
15954 }
15955 v_1_1_0_0 := v_1_1_0.Args[0]
15956 if v_1_1_0_0.Op != OpAMD64NEGQ {
15957 continue
15958 }
15959 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
15960 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
15961 continue
15962 }
15963 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
15964 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
15965 continue
15966 }
15967 v.reset(OpAMD64ROLL)
15968 v.AddArg2(x, y)
15969 return true
15970 }
15971 }
15972 break
15973 }
15974
15975
15976 for {
15977 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
15978 if v_0.Op != OpAMD64SHLL {
15979 continue
15980 }
15981 y := v_0.Args[1]
15982 x := v_0.Args[0]
15983 if v_1.Op != OpAMD64ANDL {
15984 continue
15985 }
15986 _ = v_1.Args[1]
15987 v_1_0 := v_1.Args[0]
15988 v_1_1 := v_1.Args[1]
15989 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
15990 if v_1_0.Op != OpAMD64SHRL {
15991 continue
15992 }
15993 _ = v_1_0.Args[1]
15994 if x != v_1_0.Args[0] {
15995 continue
15996 }
15997 v_1_0_1 := v_1_0.Args[1]
15998 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
15999 continue
16000 }
16001 v_1_1_0 := v_1_1.Args[0]
16002 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
16003 continue
16004 }
16005 v_1_1_0_0 := v_1_1_0.Args[0]
16006 if v_1_1_0_0.Op != OpAMD64NEGL {
16007 continue
16008 }
16009 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16010 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
16011 continue
16012 }
16013 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16014 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
16015 continue
16016 }
16017 v.reset(OpAMD64ROLL)
16018 v.AddArg2(x, y)
16019 return true
16020 }
16021 }
16022 break
16023 }
16024
16025
16026 for {
16027 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16028 if v_0.Op != OpAMD64SHRL {
16029 continue
16030 }
16031 y := v_0.Args[1]
16032 x := v_0.Args[0]
16033 if v_1.Op != OpAMD64ANDL {
16034 continue
16035 }
16036 _ = v_1.Args[1]
16037 v_1_0 := v_1.Args[0]
16038 v_1_1 := v_1.Args[1]
16039 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
16040 if v_1_0.Op != OpAMD64SHLL {
16041 continue
16042 }
16043 _ = v_1_0.Args[1]
16044 if x != v_1_0.Args[0] {
16045 continue
16046 }
16047 v_1_0_1 := v_1_0.Args[1]
16048 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
16049 continue
16050 }
16051 v_1_1_0 := v_1_1.Args[0]
16052 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
16053 continue
16054 }
16055 v_1_1_0_0 := v_1_1_0.Args[0]
16056 if v_1_1_0_0.Op != OpAMD64NEGQ {
16057 continue
16058 }
16059 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16060 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
16061 continue
16062 }
16063 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16064 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
16065 continue
16066 }
16067 v.reset(OpAMD64RORL)
16068 v.AddArg2(x, y)
16069 return true
16070 }
16071 }
16072 break
16073 }
16074
16075
16076 for {
16077 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16078 if v_0.Op != OpAMD64SHRL {
16079 continue
16080 }
16081 y := v_0.Args[1]
16082 x := v_0.Args[0]
16083 if v_1.Op != OpAMD64ANDL {
16084 continue
16085 }
16086 _ = v_1.Args[1]
16087 v_1_0 := v_1.Args[0]
16088 v_1_1 := v_1.Args[1]
16089 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
16090 if v_1_0.Op != OpAMD64SHLL {
16091 continue
16092 }
16093 _ = v_1_0.Args[1]
16094 if x != v_1_0.Args[0] {
16095 continue
16096 }
16097 v_1_0_1 := v_1_0.Args[1]
16098 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
16099 continue
16100 }
16101 v_1_1_0 := v_1_1.Args[0]
16102 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
16103 continue
16104 }
16105 v_1_1_0_0 := v_1_1_0.Args[0]
16106 if v_1_1_0_0.Op != OpAMD64NEGL {
16107 continue
16108 }
16109 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16110 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
16111 continue
16112 }
16113 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16114 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
16115 continue
16116 }
16117 v.reset(OpAMD64RORL)
16118 v.AddArg2(x, y)
16119 return true
16120 }
16121 }
16122 break
16123 }
16124
16125
16126
16127 for {
16128 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16129 if v_0.Op != OpAMD64SHLL {
16130 continue
16131 }
16132 _ = v_0.Args[1]
16133 x := v_0.Args[0]
16134 v_0_1 := v_0.Args[1]
16135 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
16136 continue
16137 }
16138 y := v_0_1.Args[0]
16139 if v_1.Op != OpAMD64ANDL {
16140 continue
16141 }
16142 _ = v_1.Args[1]
16143 v_1_0 := v_1.Args[0]
16144 v_1_1 := v_1.Args[1]
16145 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
16146 if v_1_0.Op != OpAMD64SHRW {
16147 continue
16148 }
16149 _ = v_1_0.Args[1]
16150 if x != v_1_0.Args[0] {
16151 continue
16152 }
16153 v_1_0_1 := v_1_0.Args[1]
16154 if v_1_0_1.Op != OpAMD64NEGQ {
16155 continue
16156 }
16157 v_1_0_1_0 := v_1_0_1.Args[0]
16158 if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
16159 continue
16160 }
16161 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
16162 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
16163 continue
16164 }
16165 v_1_1_0 := v_1_1.Args[0]
16166 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
16167 continue
16168 }
16169 v_1_1_0_0 := v_1_1_0.Args[0]
16170 if v_1_1_0_0.Op != OpAMD64NEGQ {
16171 continue
16172 }
16173 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16174 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
16175 continue
16176 }
16177 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16178 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
16179 continue
16180 }
16181 v.reset(OpAMD64ROLW)
16182 v.AddArg2(x, y)
16183 return true
16184 }
16185 }
16186 break
16187 }
16188
16189
16190
16191 for {
16192 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16193 if v_0.Op != OpAMD64SHLL {
16194 continue
16195 }
16196 _ = v_0.Args[1]
16197 x := v_0.Args[0]
16198 v_0_1 := v_0.Args[1]
16199 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
16200 continue
16201 }
16202 y := v_0_1.Args[0]
16203 if v_1.Op != OpAMD64ANDL {
16204 continue
16205 }
16206 _ = v_1.Args[1]
16207 v_1_0 := v_1.Args[0]
16208 v_1_1 := v_1.Args[1]
16209 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
16210 if v_1_0.Op != OpAMD64SHRW {
16211 continue
16212 }
16213 _ = v_1_0.Args[1]
16214 if x != v_1_0.Args[0] {
16215 continue
16216 }
16217 v_1_0_1 := v_1_0.Args[1]
16218 if v_1_0_1.Op != OpAMD64NEGL {
16219 continue
16220 }
16221 v_1_0_1_0 := v_1_0_1.Args[0]
16222 if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
16223 continue
16224 }
16225 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
16226 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
16227 continue
16228 }
16229 v_1_1_0 := v_1_1.Args[0]
16230 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
16231 continue
16232 }
16233 v_1_1_0_0 := v_1_1_0.Args[0]
16234 if v_1_1_0_0.Op != OpAMD64NEGL {
16235 continue
16236 }
16237 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16238 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
16239 continue
16240 }
16241 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16242 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
16243 continue
16244 }
16245 v.reset(OpAMD64ROLW)
16246 v.AddArg2(x, y)
16247 return true
16248 }
16249 }
16250 break
16251 }
16252
16253
16254
16255 for {
16256 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16257 if v_0.Op != OpAMD64SHRW {
16258 continue
16259 }
16260 _ = v_0.Args[1]
16261 x := v_0.Args[0]
16262 v_0_1 := v_0.Args[1]
16263 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
16264 continue
16265 }
16266 y := v_0_1.Args[0]
16267 if v_1.Op != OpAMD64SHLL {
16268 continue
16269 }
16270 _ = v_1.Args[1]
16271 if x != v_1.Args[0] {
16272 continue
16273 }
16274 v_1_1 := v_1.Args[1]
16275 if v_1_1.Op != OpAMD64NEGQ {
16276 continue
16277 }
16278 v_1_1_0 := v_1_1.Args[0]
16279 if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
16280 continue
16281 }
16282 v_1_1_0_0 := v_1_1_0.Args[0]
16283 if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
16284 continue
16285 }
16286 v.reset(OpAMD64RORW)
16287 v.AddArg2(x, y)
16288 return true
16289 }
16290 break
16291 }
16292
16293
16294
16295 for {
16296 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16297 if v_0.Op != OpAMD64SHRW {
16298 continue
16299 }
16300 _ = v_0.Args[1]
16301 x := v_0.Args[0]
16302 v_0_1 := v_0.Args[1]
16303 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
16304 continue
16305 }
16306 y := v_0_1.Args[0]
16307 if v_1.Op != OpAMD64SHLL {
16308 continue
16309 }
16310 _ = v_1.Args[1]
16311 if x != v_1.Args[0] {
16312 continue
16313 }
16314 v_1_1 := v_1.Args[1]
16315 if v_1_1.Op != OpAMD64NEGL {
16316 continue
16317 }
16318 v_1_1_0 := v_1_1.Args[0]
16319 if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
16320 continue
16321 }
16322 v_1_1_0_0 := v_1_1_0.Args[0]
16323 if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
16324 continue
16325 }
16326 v.reset(OpAMD64RORW)
16327 v.AddArg2(x, y)
16328 return true
16329 }
16330 break
16331 }
16332
16333
16334
16335 for {
16336 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16337 if v_0.Op != OpAMD64SHLL {
16338 continue
16339 }
16340 _ = v_0.Args[1]
16341 x := v_0.Args[0]
16342 v_0_1 := v_0.Args[1]
16343 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
16344 continue
16345 }
16346 y := v_0_1.Args[0]
16347 if v_1.Op != OpAMD64ANDL {
16348 continue
16349 }
16350 _ = v_1.Args[1]
16351 v_1_0 := v_1.Args[0]
16352 v_1_1 := v_1.Args[1]
16353 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
16354 if v_1_0.Op != OpAMD64SHRB {
16355 continue
16356 }
16357 _ = v_1_0.Args[1]
16358 if x != v_1_0.Args[0] {
16359 continue
16360 }
16361 v_1_0_1 := v_1_0.Args[1]
16362 if v_1_0_1.Op != OpAMD64NEGQ {
16363 continue
16364 }
16365 v_1_0_1_0 := v_1_0_1.Args[0]
16366 if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
16367 continue
16368 }
16369 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
16370 if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
16371 continue
16372 }
16373 v_1_1_0 := v_1_1.Args[0]
16374 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
16375 continue
16376 }
16377 v_1_1_0_0 := v_1_1_0.Args[0]
16378 if v_1_1_0_0.Op != OpAMD64NEGQ {
16379 continue
16380 }
16381 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16382 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
16383 continue
16384 }
16385 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16386 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
16387 continue
16388 }
16389 v.reset(OpAMD64ROLB)
16390 v.AddArg2(x, y)
16391 return true
16392 }
16393 }
16394 break
16395 }
16396
16397
16398
16399 for {
16400 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16401 if v_0.Op != OpAMD64SHLL {
16402 continue
16403 }
16404 _ = v_0.Args[1]
16405 x := v_0.Args[0]
16406 v_0_1 := v_0.Args[1]
16407 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
16408 continue
16409 }
16410 y := v_0_1.Args[0]
16411 if v_1.Op != OpAMD64ANDL {
16412 continue
16413 }
16414 _ = v_1.Args[1]
16415 v_1_0 := v_1.Args[0]
16416 v_1_1 := v_1.Args[1]
16417 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
16418 if v_1_0.Op != OpAMD64SHRB {
16419 continue
16420 }
16421 _ = v_1_0.Args[1]
16422 if x != v_1_0.Args[0] {
16423 continue
16424 }
16425 v_1_0_1 := v_1_0.Args[1]
16426 if v_1_0_1.Op != OpAMD64NEGL {
16427 continue
16428 }
16429 v_1_0_1_0 := v_1_0_1.Args[0]
16430 if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
16431 continue
16432 }
16433 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
16434 if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
16435 continue
16436 }
16437 v_1_1_0 := v_1_1.Args[0]
16438 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
16439 continue
16440 }
16441 v_1_1_0_0 := v_1_1_0.Args[0]
16442 if v_1_1_0_0.Op != OpAMD64NEGL {
16443 continue
16444 }
16445 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
16446 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
16447 continue
16448 }
16449 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
16450 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
16451 continue
16452 }
16453 v.reset(OpAMD64ROLB)
16454 v.AddArg2(x, y)
16455 return true
16456 }
16457 }
16458 break
16459 }
16460
16461
16462
16463 for {
16464 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16465 if v_0.Op != OpAMD64SHRB {
16466 continue
16467 }
16468 _ = v_0.Args[1]
16469 x := v_0.Args[0]
16470 v_0_1 := v_0.Args[1]
16471 if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
16472 continue
16473 }
16474 y := v_0_1.Args[0]
16475 if v_1.Op != OpAMD64SHLL {
16476 continue
16477 }
16478 _ = v_1.Args[1]
16479 if x != v_1.Args[0] {
16480 continue
16481 }
16482 v_1_1 := v_1.Args[1]
16483 if v_1_1.Op != OpAMD64NEGQ {
16484 continue
16485 }
16486 v_1_1_0 := v_1_1.Args[0]
16487 if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
16488 continue
16489 }
16490 v_1_1_0_0 := v_1_1_0.Args[0]
16491 if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
16492 continue
16493 }
16494 v.reset(OpAMD64RORB)
16495 v.AddArg2(x, y)
16496 return true
16497 }
16498 break
16499 }
16500
16501
16502
16503 for {
16504 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16505 if v_0.Op != OpAMD64SHRB {
16506 continue
16507 }
16508 _ = v_0.Args[1]
16509 x := v_0.Args[0]
16510 v_0_1 := v_0.Args[1]
16511 if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
16512 continue
16513 }
16514 y := v_0_1.Args[0]
16515 if v_1.Op != OpAMD64SHLL {
16516 continue
16517 }
16518 _ = v_1.Args[1]
16519 if x != v_1.Args[0] {
16520 continue
16521 }
16522 v_1_1 := v_1.Args[1]
16523 if v_1_1.Op != OpAMD64NEGL {
16524 continue
16525 }
16526 v_1_1_0 := v_1_1.Args[0]
16527 if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
16528 continue
16529 }
16530 v_1_1_0_0 := v_1_1_0.Args[0]
16531 if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
16532 continue
16533 }
16534 v.reset(OpAMD64RORB)
16535 v.AddArg2(x, y)
16536 return true
16537 }
16538 break
16539 }
16540
16541
16542 for {
16543 x := v_0
16544 if x != v_1 {
16545 break
16546 }
16547 v.copyOf(x)
16548 return true
16549 }
16550
16551
16552
16553 for {
16554 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16555 x0 := v_0
16556 if x0.Op != OpAMD64MOVBload {
16557 continue
16558 }
16559 i0 := auxIntToInt32(x0.AuxInt)
16560 s := auxToSym(x0.Aux)
16561 mem := x0.Args[1]
16562 p := x0.Args[0]
16563 sh := v_1
16564 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
16565 continue
16566 }
16567 x1 := sh.Args[0]
16568 if x1.Op != OpAMD64MOVBload {
16569 continue
16570 }
16571 i1 := auxIntToInt32(x1.AuxInt)
16572 if auxToSym(x1.Aux) != s {
16573 continue
16574 }
16575 _ = x1.Args[1]
16576 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
16577 continue
16578 }
16579 b = mergePoint(b, x0, x1)
16580 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
16581 v.copyOf(v0)
16582 v0.AuxInt = int32ToAuxInt(i0)
16583 v0.Aux = symToAux(s)
16584 v0.AddArg2(p, mem)
16585 return true
16586 }
16587 break
16588 }
16589
16590
16591
16592 for {
16593 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16594 x0 := v_0
16595 if x0.Op != OpAMD64MOVBload {
16596 continue
16597 }
16598 i := auxIntToInt32(x0.AuxInt)
16599 s := auxToSym(x0.Aux)
16600 mem := x0.Args[1]
16601 p0 := x0.Args[0]
16602 sh := v_1
16603 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
16604 continue
16605 }
16606 x1 := sh.Args[0]
16607 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
16608 continue
16609 }
16610 _ = x1.Args[1]
16611 p1 := x1.Args[0]
16612 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
16613 continue
16614 }
16615 b = mergePoint(b, x0, x1)
16616 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
16617 v.copyOf(v0)
16618 v0.AuxInt = int32ToAuxInt(i)
16619 v0.Aux = symToAux(s)
16620 v0.AddArg2(p0, mem)
16621 return true
16622 }
16623 break
16624 }
16625
16626
16627
16628 for {
16629 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16630 x0 := v_0
16631 if x0.Op != OpAMD64MOVWload {
16632 continue
16633 }
16634 i0 := auxIntToInt32(x0.AuxInt)
16635 s := auxToSym(x0.Aux)
16636 mem := x0.Args[1]
16637 p := x0.Args[0]
16638 sh := v_1
16639 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
16640 continue
16641 }
16642 x1 := sh.Args[0]
16643 if x1.Op != OpAMD64MOVWload {
16644 continue
16645 }
16646 i1 := auxIntToInt32(x1.AuxInt)
16647 if auxToSym(x1.Aux) != s {
16648 continue
16649 }
16650 _ = x1.Args[1]
16651 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
16652 continue
16653 }
16654 b = mergePoint(b, x0, x1)
16655 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
16656 v.copyOf(v0)
16657 v0.AuxInt = int32ToAuxInt(i0)
16658 v0.Aux = symToAux(s)
16659 v0.AddArg2(p, mem)
16660 return true
16661 }
16662 break
16663 }
16664
16665
16666
16667 for {
16668 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16669 x0 := v_0
16670 if x0.Op != OpAMD64MOVWload {
16671 continue
16672 }
16673 i := auxIntToInt32(x0.AuxInt)
16674 s := auxToSym(x0.Aux)
16675 mem := x0.Args[1]
16676 p0 := x0.Args[0]
16677 sh := v_1
16678 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
16679 continue
16680 }
16681 x1 := sh.Args[0]
16682 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
16683 continue
16684 }
16685 _ = x1.Args[1]
16686 p1 := x1.Args[0]
16687 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
16688 continue
16689 }
16690 b = mergePoint(b, x0, x1)
16691 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
16692 v.copyOf(v0)
16693 v0.AuxInt = int32ToAuxInt(i)
16694 v0.Aux = symToAux(s)
16695 v0.AddArg2(p0, mem)
16696 return true
16697 }
16698 break
16699 }
16700
16701
16702
16703 for {
16704 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16705 s1 := v_0
16706 if s1.Op != OpAMD64SHLLconst {
16707 continue
16708 }
16709 j1 := auxIntToInt8(s1.AuxInt)
16710 x1 := s1.Args[0]
16711 if x1.Op != OpAMD64MOVBload {
16712 continue
16713 }
16714 i1 := auxIntToInt32(x1.AuxInt)
16715 s := auxToSym(x1.Aux)
16716 mem := x1.Args[1]
16717 p := x1.Args[0]
16718 or := v_1
16719 if or.Op != OpAMD64ORL {
16720 continue
16721 }
16722 _ = or.Args[1]
16723 or_0 := or.Args[0]
16724 or_1 := or.Args[1]
16725 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
16726 s0 := or_0
16727 if s0.Op != OpAMD64SHLLconst {
16728 continue
16729 }
16730 j0 := auxIntToInt8(s0.AuxInt)
16731 x0 := s0.Args[0]
16732 if x0.Op != OpAMD64MOVBload {
16733 continue
16734 }
16735 i0 := auxIntToInt32(x0.AuxInt)
16736 if auxToSym(x0.Aux) != s {
16737 continue
16738 }
16739 _ = x0.Args[1]
16740 if p != x0.Args[0] || mem != x0.Args[1] {
16741 continue
16742 }
16743 y := or_1
16744 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
16745 continue
16746 }
16747 b = mergePoint(b, x0, x1, y)
16748 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
16749 v.copyOf(v0)
16750 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
16751 v1.AuxInt = int8ToAuxInt(j0)
16752 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
16753 v2.AuxInt = int32ToAuxInt(i0)
16754 v2.Aux = symToAux(s)
16755 v2.AddArg2(p, mem)
16756 v1.AddArg(v2)
16757 v0.AddArg2(v1, y)
16758 return true
16759 }
16760 }
16761 break
16762 }
16763
16764
16765
16766 for {
16767 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16768 s1 := v_0
16769 if s1.Op != OpAMD64SHLLconst {
16770 continue
16771 }
16772 j1 := auxIntToInt8(s1.AuxInt)
16773 x1 := s1.Args[0]
16774 if x1.Op != OpAMD64MOVBload {
16775 continue
16776 }
16777 i := auxIntToInt32(x1.AuxInt)
16778 s := auxToSym(x1.Aux)
16779 mem := x1.Args[1]
16780 p1 := x1.Args[0]
16781 or := v_1
16782 if or.Op != OpAMD64ORL {
16783 continue
16784 }
16785 _ = or.Args[1]
16786 or_0 := or.Args[0]
16787 or_1 := or.Args[1]
16788 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
16789 s0 := or_0
16790 if s0.Op != OpAMD64SHLLconst {
16791 continue
16792 }
16793 j0 := auxIntToInt8(s0.AuxInt)
16794 x0 := s0.Args[0]
16795 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
16796 continue
16797 }
16798 _ = x0.Args[1]
16799 p0 := x0.Args[0]
16800 if mem != x0.Args[1] {
16801 continue
16802 }
16803 y := or_1
16804 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
16805 continue
16806 }
16807 b = mergePoint(b, x0, x1, y)
16808 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
16809 v.copyOf(v0)
16810 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
16811 v1.AuxInt = int8ToAuxInt(j0)
16812 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
16813 v2.AuxInt = int32ToAuxInt(i)
16814 v2.Aux = symToAux(s)
16815 v2.AddArg2(p0, mem)
16816 v1.AddArg(v2)
16817 v0.AddArg2(v1, y)
16818 return true
16819 }
16820 }
16821 break
16822 }
16823
16824
16825
16826 for {
16827 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16828 x1 := v_0
16829 if x1.Op != OpAMD64MOVBload {
16830 continue
16831 }
16832 i1 := auxIntToInt32(x1.AuxInt)
16833 s := auxToSym(x1.Aux)
16834 mem := x1.Args[1]
16835 p := x1.Args[0]
16836 sh := v_1
16837 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
16838 continue
16839 }
16840 x0 := sh.Args[0]
16841 if x0.Op != OpAMD64MOVBload {
16842 continue
16843 }
16844 i0 := auxIntToInt32(x0.AuxInt)
16845 if auxToSym(x0.Aux) != s {
16846 continue
16847 }
16848 _ = x0.Args[1]
16849 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
16850 continue
16851 }
16852 b = mergePoint(b, x0, x1)
16853 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
16854 v.copyOf(v0)
16855 v0.AuxInt = int8ToAuxInt(8)
16856 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
16857 v1.AuxInt = int32ToAuxInt(i0)
16858 v1.Aux = symToAux(s)
16859 v1.AddArg2(p, mem)
16860 v0.AddArg(v1)
16861 return true
16862 }
16863 break
16864 }
16865
16866
16867
16868 for {
16869 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16870 x1 := v_0
16871 if x1.Op != OpAMD64MOVBload {
16872 continue
16873 }
16874 i := auxIntToInt32(x1.AuxInt)
16875 s := auxToSym(x1.Aux)
16876 mem := x1.Args[1]
16877 p1 := x1.Args[0]
16878 sh := v_1
16879 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
16880 continue
16881 }
16882 x0 := sh.Args[0]
16883 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
16884 continue
16885 }
16886 _ = x0.Args[1]
16887 p0 := x0.Args[0]
16888 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
16889 continue
16890 }
16891 b = mergePoint(b, x0, x1)
16892 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
16893 v.copyOf(v0)
16894 v0.AuxInt = int8ToAuxInt(8)
16895 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
16896 v1.AuxInt = int32ToAuxInt(i)
16897 v1.Aux = symToAux(s)
16898 v1.AddArg2(p0, mem)
16899 v0.AddArg(v1)
16900 return true
16901 }
16902 break
16903 }
16904
16905
16906
16907 for {
16908 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16909 r1 := v_0
16910 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
16911 continue
16912 }
16913 x1 := r1.Args[0]
16914 if x1.Op != OpAMD64MOVWload {
16915 continue
16916 }
16917 i1 := auxIntToInt32(x1.AuxInt)
16918 s := auxToSym(x1.Aux)
16919 mem := x1.Args[1]
16920 p := x1.Args[0]
16921 sh := v_1
16922 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
16923 continue
16924 }
16925 r0 := sh.Args[0]
16926 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
16927 continue
16928 }
16929 x0 := r0.Args[0]
16930 if x0.Op != OpAMD64MOVWload {
16931 continue
16932 }
16933 i0 := auxIntToInt32(x0.AuxInt)
16934 if auxToSym(x0.Aux) != s {
16935 continue
16936 }
16937 _ = x0.Args[1]
16938 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
16939 continue
16940 }
16941 b = mergePoint(b, x0, x1)
16942 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
16943 v.copyOf(v0)
16944 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
16945 v1.AuxInt = int32ToAuxInt(i0)
16946 v1.Aux = symToAux(s)
16947 v1.AddArg2(p, mem)
16948 v0.AddArg(v1)
16949 return true
16950 }
16951 break
16952 }
16953
16954
16955
16956 for {
16957 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
16958 r1 := v_0
16959 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
16960 continue
16961 }
16962 x1 := r1.Args[0]
16963 if x1.Op != OpAMD64MOVWload {
16964 continue
16965 }
16966 i := auxIntToInt32(x1.AuxInt)
16967 s := auxToSym(x1.Aux)
16968 mem := x1.Args[1]
16969 p1 := x1.Args[0]
16970 sh := v_1
16971 if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
16972 continue
16973 }
16974 r0 := sh.Args[0]
16975 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
16976 continue
16977 }
16978 x0 := r0.Args[0]
16979 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
16980 continue
16981 }
16982 _ = x0.Args[1]
16983 p0 := x0.Args[0]
16984 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
16985 continue
16986 }
16987 b = mergePoint(b, x0, x1)
16988 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
16989 v.copyOf(v0)
16990 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
16991 v1.AuxInt = int32ToAuxInt(i)
16992 v1.Aux = symToAux(s)
16993 v1.AddArg2(p0, mem)
16994 v0.AddArg(v1)
16995 return true
16996 }
16997 break
16998 }
16999
17000
17001
17002 for {
17003 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17004 s0 := v_0
17005 if s0.Op != OpAMD64SHLLconst {
17006 continue
17007 }
17008 j0 := auxIntToInt8(s0.AuxInt)
17009 x0 := s0.Args[0]
17010 if x0.Op != OpAMD64MOVBload {
17011 continue
17012 }
17013 i0 := auxIntToInt32(x0.AuxInt)
17014 s := auxToSym(x0.Aux)
17015 mem := x0.Args[1]
17016 p := x0.Args[0]
17017 or := v_1
17018 if or.Op != OpAMD64ORL {
17019 continue
17020 }
17021 _ = or.Args[1]
17022 or_0 := or.Args[0]
17023 or_1 := or.Args[1]
17024 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
17025 s1 := or_0
17026 if s1.Op != OpAMD64SHLLconst {
17027 continue
17028 }
17029 j1 := auxIntToInt8(s1.AuxInt)
17030 x1 := s1.Args[0]
17031 if x1.Op != OpAMD64MOVBload {
17032 continue
17033 }
17034 i1 := auxIntToInt32(x1.AuxInt)
17035 if auxToSym(x1.Aux) != s {
17036 continue
17037 }
17038 _ = x1.Args[1]
17039 if p != x1.Args[0] || mem != x1.Args[1] {
17040 continue
17041 }
17042 y := or_1
17043 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
17044 continue
17045 }
17046 b = mergePoint(b, x0, x1, y)
17047 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
17048 v.copyOf(v0)
17049 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
17050 v1.AuxInt = int8ToAuxInt(j1)
17051 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
17052 v2.AuxInt = int8ToAuxInt(8)
17053 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17054 v3.AuxInt = int32ToAuxInt(i0)
17055 v3.Aux = symToAux(s)
17056 v3.AddArg2(p, mem)
17057 v2.AddArg(v3)
17058 v1.AddArg(v2)
17059 v0.AddArg2(v1, y)
17060 return true
17061 }
17062 }
17063 break
17064 }
17065
17066
17067
17068 for {
17069 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17070 s0 := v_0
17071 if s0.Op != OpAMD64SHLLconst {
17072 continue
17073 }
17074 j0 := auxIntToInt8(s0.AuxInt)
17075 x0 := s0.Args[0]
17076 if x0.Op != OpAMD64MOVBload {
17077 continue
17078 }
17079 i := auxIntToInt32(x0.AuxInt)
17080 s := auxToSym(x0.Aux)
17081 mem := x0.Args[1]
17082 p0 := x0.Args[0]
17083 or := v_1
17084 if or.Op != OpAMD64ORL {
17085 continue
17086 }
17087 _ = or.Args[1]
17088 or_0 := or.Args[0]
17089 or_1 := or.Args[1]
17090 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
17091 s1 := or_0
17092 if s1.Op != OpAMD64SHLLconst {
17093 continue
17094 }
17095 j1 := auxIntToInt8(s1.AuxInt)
17096 x1 := s1.Args[0]
17097 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17098 continue
17099 }
17100 _ = x1.Args[1]
17101 p1 := x1.Args[0]
17102 if mem != x1.Args[1] {
17103 continue
17104 }
17105 y := or_1
17106 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
17107 continue
17108 }
17109 b = mergePoint(b, x0, x1, y)
17110 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
17111 v.copyOf(v0)
17112 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
17113 v1.AuxInt = int8ToAuxInt(j1)
17114 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
17115 v2.AuxInt = int8ToAuxInt(8)
17116 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17117 v3.AuxInt = int32ToAuxInt(i)
17118 v3.Aux = symToAux(s)
17119 v3.AddArg2(p0, mem)
17120 v2.AddArg(v3)
17121 v1.AddArg(v2)
17122 v0.AddArg2(v1, y)
17123 return true
17124 }
17125 }
17126 break
17127 }
17128
17129
17130
17131 for {
17132 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17133 x := v_0
17134 l := v_1
17135 if l.Op != OpAMD64MOVLload {
17136 continue
17137 }
17138 off := auxIntToInt32(l.AuxInt)
17139 sym := auxToSym(l.Aux)
17140 mem := l.Args[1]
17141 ptr := l.Args[0]
17142 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
17143 continue
17144 }
17145 v.reset(OpAMD64ORLload)
17146 v.AuxInt = int32ToAuxInt(off)
17147 v.Aux = symToAux(sym)
17148 v.AddArg3(x, ptr, mem)
17149 return true
17150 }
17151 break
17152 }
17153 return false
17154 }
17155 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
17156 v_0 := v.Args[0]
17157
17158
17159
17160 for {
17161 c := auxIntToInt32(v.AuxInt)
17162 x := v_0
17163 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
17164 break
17165 }
17166 v.reset(OpAMD64BTSLconst)
17167 v.AuxInt = int8ToAuxInt(int8(log32(c)))
17168 v.AddArg(x)
17169 return true
17170 }
17171
17172
17173 for {
17174 c := auxIntToInt32(v.AuxInt)
17175 if v_0.Op != OpAMD64ORLconst {
17176 break
17177 }
17178 d := auxIntToInt32(v_0.AuxInt)
17179 x := v_0.Args[0]
17180 v.reset(OpAMD64ORLconst)
17181 v.AuxInt = int32ToAuxInt(c | d)
17182 v.AddArg(x)
17183 return true
17184 }
17185
17186
17187 for {
17188 c := auxIntToInt32(v.AuxInt)
17189 if v_0.Op != OpAMD64BTSLconst {
17190 break
17191 }
17192 d := auxIntToInt8(v_0.AuxInt)
17193 x := v_0.Args[0]
17194 v.reset(OpAMD64ORLconst)
17195 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
17196 v.AddArg(x)
17197 return true
17198 }
17199
17200
17201
17202 for {
17203 c := auxIntToInt32(v.AuxInt)
17204 x := v_0
17205 if !(c == 0) {
17206 break
17207 }
17208 v.copyOf(x)
17209 return true
17210 }
17211
17212
17213
17214 for {
17215 c := auxIntToInt32(v.AuxInt)
17216 if !(c == -1) {
17217 break
17218 }
17219 v.reset(OpAMD64MOVLconst)
17220 v.AuxInt = int32ToAuxInt(-1)
17221 return true
17222 }
17223
17224
17225 for {
17226 c := auxIntToInt32(v.AuxInt)
17227 if v_0.Op != OpAMD64MOVLconst {
17228 break
17229 }
17230 d := auxIntToInt32(v_0.AuxInt)
17231 v.reset(OpAMD64MOVLconst)
17232 v.AuxInt = int32ToAuxInt(c | d)
17233 return true
17234 }
17235 return false
17236 }
17237 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
17238 v_1 := v.Args[1]
17239 v_0 := v.Args[0]
17240
17241
17242
17243 for {
17244 valoff1 := auxIntToValAndOff(v.AuxInt)
17245 sym := auxToSym(v.Aux)
17246 if v_0.Op != OpAMD64ADDQconst {
17247 break
17248 }
17249 off2 := auxIntToInt32(v_0.AuxInt)
17250 base := v_0.Args[0]
17251 mem := v_1
17252 if !(ValAndOff(valoff1).canAdd32(off2)) {
17253 break
17254 }
17255 v.reset(OpAMD64ORLconstmodify)
17256 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
17257 v.Aux = symToAux(sym)
17258 v.AddArg2(base, mem)
17259 return true
17260 }
17261
17262
17263
17264 for {
17265 valoff1 := auxIntToValAndOff(v.AuxInt)
17266 sym1 := auxToSym(v.Aux)
17267 if v_0.Op != OpAMD64LEAQ {
17268 break
17269 }
17270 off2 := auxIntToInt32(v_0.AuxInt)
17271 sym2 := auxToSym(v_0.Aux)
17272 base := v_0.Args[0]
17273 mem := v_1
17274 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
17275 break
17276 }
17277 v.reset(OpAMD64ORLconstmodify)
17278 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
17279 v.Aux = symToAux(mergeSym(sym1, sym2))
17280 v.AddArg2(base, mem)
17281 return true
17282 }
17283 return false
17284 }
17285 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
17286 v_2 := v.Args[2]
17287 v_1 := v.Args[1]
17288 v_0 := v.Args[0]
17289 b := v.Block
17290 typ := &b.Func.Config.Types
17291
17292
17293
17294 for {
17295 off1 := auxIntToInt32(v.AuxInt)
17296 sym := auxToSym(v.Aux)
17297 val := v_0
17298 if v_1.Op != OpAMD64ADDQconst {
17299 break
17300 }
17301 off2 := auxIntToInt32(v_1.AuxInt)
17302 base := v_1.Args[0]
17303 mem := v_2
17304 if !(is32Bit(int64(off1) + int64(off2))) {
17305 break
17306 }
17307 v.reset(OpAMD64ORLload)
17308 v.AuxInt = int32ToAuxInt(off1 + off2)
17309 v.Aux = symToAux(sym)
17310 v.AddArg3(val, base, mem)
17311 return true
17312 }
17313
17314
17315
17316 for {
17317 off1 := auxIntToInt32(v.AuxInt)
17318 sym1 := auxToSym(v.Aux)
17319 val := v_0
17320 if v_1.Op != OpAMD64LEAQ {
17321 break
17322 }
17323 off2 := auxIntToInt32(v_1.AuxInt)
17324 sym2 := auxToSym(v_1.Aux)
17325 base := v_1.Args[0]
17326 mem := v_2
17327 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17328 break
17329 }
17330 v.reset(OpAMD64ORLload)
17331 v.AuxInt = int32ToAuxInt(off1 + off2)
17332 v.Aux = symToAux(mergeSym(sym1, sym2))
17333 v.AddArg3(val, base, mem)
17334 return true
17335 }
17336
17337
17338 for {
17339 off := auxIntToInt32(v.AuxInt)
17340 sym := auxToSym(v.Aux)
17341 x := v_0
17342 ptr := v_1
17343 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
17344 break
17345 }
17346 y := v_2.Args[1]
17347 if ptr != v_2.Args[0] {
17348 break
17349 }
17350 v.reset(OpAMD64ORL)
17351 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
17352 v0.AddArg(y)
17353 v.AddArg2(x, v0)
17354 return true
17355 }
17356 return false
17357 }
17358 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
17359 v_2 := v.Args[2]
17360 v_1 := v.Args[1]
17361 v_0 := v.Args[0]
17362
17363
17364
17365 for {
17366 off1 := auxIntToInt32(v.AuxInt)
17367 sym := auxToSym(v.Aux)
17368 if v_0.Op != OpAMD64ADDQconst {
17369 break
17370 }
17371 off2 := auxIntToInt32(v_0.AuxInt)
17372 base := v_0.Args[0]
17373 val := v_1
17374 mem := v_2
17375 if !(is32Bit(int64(off1) + int64(off2))) {
17376 break
17377 }
17378 v.reset(OpAMD64ORLmodify)
17379 v.AuxInt = int32ToAuxInt(off1 + off2)
17380 v.Aux = symToAux(sym)
17381 v.AddArg3(base, val, mem)
17382 return true
17383 }
17384
17385
17386
17387 for {
17388 off1 := auxIntToInt32(v.AuxInt)
17389 sym1 := auxToSym(v.Aux)
17390 if v_0.Op != OpAMD64LEAQ {
17391 break
17392 }
17393 off2 := auxIntToInt32(v_0.AuxInt)
17394 sym2 := auxToSym(v_0.Aux)
17395 base := v_0.Args[0]
17396 val := v_1
17397 mem := v_2
17398 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17399 break
17400 }
17401 v.reset(OpAMD64ORLmodify)
17402 v.AuxInt = int32ToAuxInt(off1 + off2)
17403 v.Aux = symToAux(mergeSym(sym1, sym2))
17404 v.AddArg3(base, val, mem)
17405 return true
17406 }
17407 return false
17408 }
17409 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
17410 v_1 := v.Args[1]
17411 v_0 := v.Args[0]
17412 b := v.Block
17413 typ := &b.Func.Config.Types
17414
17415
17416 for {
17417 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17418 if v_0.Op != OpAMD64SHLQ {
17419 continue
17420 }
17421 y := v_0.Args[1]
17422 v_0_0 := v_0.Args[0]
17423 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
17424 continue
17425 }
17426 x := v_1
17427 v.reset(OpAMD64BTSQ)
17428 v.AddArg2(x, y)
17429 return true
17430 }
17431 break
17432 }
17433
17434
17435
17436 for {
17437 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17438 if v_0.Op != OpAMD64MOVQconst {
17439 continue
17440 }
17441 c := auxIntToInt64(v_0.AuxInt)
17442 x := v_1
17443 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
17444 continue
17445 }
17446 v.reset(OpAMD64BTSQconst)
17447 v.AuxInt = int8ToAuxInt(int8(log64(c)))
17448 v.AddArg(x)
17449 return true
17450 }
17451 break
17452 }
17453
17454
17455
17456 for {
17457 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17458 x := v_0
17459 if v_1.Op != OpAMD64MOVQconst {
17460 continue
17461 }
17462 c := auxIntToInt64(v_1.AuxInt)
17463 if !(is32Bit(c)) {
17464 continue
17465 }
17466 v.reset(OpAMD64ORQconst)
17467 v.AuxInt = int32ToAuxInt(int32(c))
17468 v.AddArg(x)
17469 return true
17470 }
17471 break
17472 }
17473
17474
17475 for {
17476 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17477 x := v_0
17478 if v_1.Op != OpAMD64MOVLconst {
17479 continue
17480 }
17481 c := auxIntToInt32(v_1.AuxInt)
17482 v.reset(OpAMD64ORQconst)
17483 v.AuxInt = int32ToAuxInt(c)
17484 v.AddArg(x)
17485 return true
17486 }
17487 break
17488 }
17489
17490
17491
17492 for {
17493 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17494 if v_0.Op != OpAMD64SHLQconst {
17495 continue
17496 }
17497 c := auxIntToInt8(v_0.AuxInt)
17498 x := v_0.Args[0]
17499 if v_1.Op != OpAMD64SHRQconst {
17500 continue
17501 }
17502 d := auxIntToInt8(v_1.AuxInt)
17503 if x != v_1.Args[0] || !(d == 64-c) {
17504 continue
17505 }
17506 v.reset(OpAMD64ROLQconst)
17507 v.AuxInt = int8ToAuxInt(c)
17508 v.AddArg(x)
17509 return true
17510 }
17511 break
17512 }
17513
17514
17515 for {
17516 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17517 if v_0.Op != OpAMD64SHLQ {
17518 continue
17519 }
17520 y := v_0.Args[1]
17521 x := v_0.Args[0]
17522 if v_1.Op != OpAMD64ANDQ {
17523 continue
17524 }
17525 _ = v_1.Args[1]
17526 v_1_0 := v_1.Args[0]
17527 v_1_1 := v_1.Args[1]
17528 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17529 if v_1_0.Op != OpAMD64SHRQ {
17530 continue
17531 }
17532 _ = v_1_0.Args[1]
17533 if x != v_1_0.Args[0] {
17534 continue
17535 }
17536 v_1_0_1 := v_1_0.Args[1]
17537 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
17538 continue
17539 }
17540 v_1_1_0 := v_1_1.Args[0]
17541 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
17542 continue
17543 }
17544 v_1_1_0_0 := v_1_1_0.Args[0]
17545 if v_1_1_0_0.Op != OpAMD64NEGQ {
17546 continue
17547 }
17548 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17549 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
17550 continue
17551 }
17552 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17553 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
17554 continue
17555 }
17556 v.reset(OpAMD64ROLQ)
17557 v.AddArg2(x, y)
17558 return true
17559 }
17560 }
17561 break
17562 }
17563
17564
17565 for {
17566 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17567 if v_0.Op != OpAMD64SHLQ {
17568 continue
17569 }
17570 y := v_0.Args[1]
17571 x := v_0.Args[0]
17572 if v_1.Op != OpAMD64ANDQ {
17573 continue
17574 }
17575 _ = v_1.Args[1]
17576 v_1_0 := v_1.Args[0]
17577 v_1_1 := v_1.Args[1]
17578 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17579 if v_1_0.Op != OpAMD64SHRQ {
17580 continue
17581 }
17582 _ = v_1_0.Args[1]
17583 if x != v_1_0.Args[0] {
17584 continue
17585 }
17586 v_1_0_1 := v_1_0.Args[1]
17587 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
17588 continue
17589 }
17590 v_1_1_0 := v_1_1.Args[0]
17591 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
17592 continue
17593 }
17594 v_1_1_0_0 := v_1_1_0.Args[0]
17595 if v_1_1_0_0.Op != OpAMD64NEGL {
17596 continue
17597 }
17598 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17599 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
17600 continue
17601 }
17602 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17603 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
17604 continue
17605 }
17606 v.reset(OpAMD64ROLQ)
17607 v.AddArg2(x, y)
17608 return true
17609 }
17610 }
17611 break
17612 }
17613
17614
17615 for {
17616 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17617 if v_0.Op != OpAMD64SHRQ {
17618 continue
17619 }
17620 y := v_0.Args[1]
17621 x := v_0.Args[0]
17622 if v_1.Op != OpAMD64ANDQ {
17623 continue
17624 }
17625 _ = v_1.Args[1]
17626 v_1_0 := v_1.Args[0]
17627 v_1_1 := v_1.Args[1]
17628 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17629 if v_1_0.Op != OpAMD64SHLQ {
17630 continue
17631 }
17632 _ = v_1_0.Args[1]
17633 if x != v_1_0.Args[0] {
17634 continue
17635 }
17636 v_1_0_1 := v_1_0.Args[1]
17637 if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
17638 continue
17639 }
17640 v_1_1_0 := v_1_1.Args[0]
17641 if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
17642 continue
17643 }
17644 v_1_1_0_0 := v_1_1_0.Args[0]
17645 if v_1_1_0_0.Op != OpAMD64NEGQ {
17646 continue
17647 }
17648 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17649 if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
17650 continue
17651 }
17652 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17653 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
17654 continue
17655 }
17656 v.reset(OpAMD64RORQ)
17657 v.AddArg2(x, y)
17658 return true
17659 }
17660 }
17661 break
17662 }
17663
17664
17665 for {
17666 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17667 if v_0.Op != OpAMD64SHRQ {
17668 continue
17669 }
17670 y := v_0.Args[1]
17671 x := v_0.Args[0]
17672 if v_1.Op != OpAMD64ANDQ {
17673 continue
17674 }
17675 _ = v_1.Args[1]
17676 v_1_0 := v_1.Args[0]
17677 v_1_1 := v_1.Args[1]
17678 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
17679 if v_1_0.Op != OpAMD64SHLQ {
17680 continue
17681 }
17682 _ = v_1_0.Args[1]
17683 if x != v_1_0.Args[0] {
17684 continue
17685 }
17686 v_1_0_1 := v_1_0.Args[1]
17687 if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
17688 continue
17689 }
17690 v_1_1_0 := v_1_1.Args[0]
17691 if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
17692 continue
17693 }
17694 v_1_1_0_0 := v_1_1_0.Args[0]
17695 if v_1_1_0_0.Op != OpAMD64NEGL {
17696 continue
17697 }
17698 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
17699 if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
17700 continue
17701 }
17702 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
17703 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
17704 continue
17705 }
17706 v.reset(OpAMD64RORQ)
17707 v.AddArg2(x, y)
17708 return true
17709 }
17710 }
17711 break
17712 }
17713
17714
17715 for {
17716 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17717 if v_0.Op != OpAMD64SHRQ {
17718 continue
17719 }
17720 bits := v_0.Args[1]
17721 lo := v_0.Args[0]
17722 if v_1.Op != OpAMD64SHLQ {
17723 continue
17724 }
17725 _ = v_1.Args[1]
17726 hi := v_1.Args[0]
17727 v_1_1 := v_1.Args[1]
17728 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
17729 continue
17730 }
17731 v.reset(OpAMD64SHRDQ)
17732 v.AddArg3(lo, hi, bits)
17733 return true
17734 }
17735 break
17736 }
17737
17738
17739 for {
17740 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17741 if v_0.Op != OpAMD64SHLQ {
17742 continue
17743 }
17744 bits := v_0.Args[1]
17745 lo := v_0.Args[0]
17746 if v_1.Op != OpAMD64SHRQ {
17747 continue
17748 }
17749 _ = v_1.Args[1]
17750 hi := v_1.Args[0]
17751 v_1_1 := v_1.Args[1]
17752 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
17753 continue
17754 }
17755 v.reset(OpAMD64SHLDQ)
17756 v.AddArg3(lo, hi, bits)
17757 return true
17758 }
17759 break
17760 }
17761
17762
17763 for {
17764 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17765 if v_0.Op != OpAMD64MOVQconst {
17766 continue
17767 }
17768 c := auxIntToInt64(v_0.AuxInt)
17769 if v_1.Op != OpAMD64MOVQconst {
17770 continue
17771 }
17772 d := auxIntToInt64(v_1.AuxInt)
17773 v.reset(OpAMD64MOVQconst)
17774 v.AuxInt = int64ToAuxInt(c | d)
17775 return true
17776 }
17777 break
17778 }
17779
17780
17781 for {
17782 x := v_0
17783 if x != v_1 {
17784 break
17785 }
17786 v.copyOf(x)
17787 return true
17788 }
17789
17790
17791
17792 for {
17793 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17794 x0 := v_0
17795 if x0.Op != OpAMD64MOVBload {
17796 continue
17797 }
17798 i0 := auxIntToInt32(x0.AuxInt)
17799 s := auxToSym(x0.Aux)
17800 mem := x0.Args[1]
17801 p := x0.Args[0]
17802 sh := v_1
17803 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
17804 continue
17805 }
17806 x1 := sh.Args[0]
17807 if x1.Op != OpAMD64MOVBload {
17808 continue
17809 }
17810 i1 := auxIntToInt32(x1.AuxInt)
17811 if auxToSym(x1.Aux) != s {
17812 continue
17813 }
17814 _ = x1.Args[1]
17815 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17816 continue
17817 }
17818 b = mergePoint(b, x0, x1)
17819 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17820 v.copyOf(v0)
17821 v0.AuxInt = int32ToAuxInt(i0)
17822 v0.Aux = symToAux(s)
17823 v0.AddArg2(p, mem)
17824 return true
17825 }
17826 break
17827 }
17828
17829
17830
17831 for {
17832 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17833 x0 := v_0
17834 if x0.Op != OpAMD64MOVBload {
17835 continue
17836 }
17837 i := auxIntToInt32(x0.AuxInt)
17838 s := auxToSym(x0.Aux)
17839 mem := x0.Args[1]
17840 p0 := x0.Args[0]
17841 sh := v_1
17842 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
17843 continue
17844 }
17845 x1 := sh.Args[0]
17846 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17847 continue
17848 }
17849 _ = x1.Args[1]
17850 p1 := x1.Args[0]
17851 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17852 continue
17853 }
17854 b = mergePoint(b, x0, x1)
17855 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
17856 v.copyOf(v0)
17857 v0.AuxInt = int32ToAuxInt(i)
17858 v0.Aux = symToAux(s)
17859 v0.AddArg2(p0, mem)
17860 return true
17861 }
17862 break
17863 }
17864
17865
17866
17867 for {
17868 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17869 x0 := v_0
17870 if x0.Op != OpAMD64MOVWload {
17871 continue
17872 }
17873 i0 := auxIntToInt32(x0.AuxInt)
17874 s := auxToSym(x0.Aux)
17875 mem := x0.Args[1]
17876 p := x0.Args[0]
17877 sh := v_1
17878 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
17879 continue
17880 }
17881 x1 := sh.Args[0]
17882 if x1.Op != OpAMD64MOVWload {
17883 continue
17884 }
17885 i1 := auxIntToInt32(x1.AuxInt)
17886 if auxToSym(x1.Aux) != s {
17887 continue
17888 }
17889 _ = x1.Args[1]
17890 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17891 continue
17892 }
17893 b = mergePoint(b, x0, x1)
17894 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
17895 v.copyOf(v0)
17896 v0.AuxInt = int32ToAuxInt(i0)
17897 v0.Aux = symToAux(s)
17898 v0.AddArg2(p, mem)
17899 return true
17900 }
17901 break
17902 }
17903
17904
17905
17906 for {
17907 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17908 x0 := v_0
17909 if x0.Op != OpAMD64MOVWload {
17910 continue
17911 }
17912 i := auxIntToInt32(x0.AuxInt)
17913 s := auxToSym(x0.Aux)
17914 mem := x0.Args[1]
17915 p0 := x0.Args[0]
17916 sh := v_1
17917 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
17918 continue
17919 }
17920 x1 := sh.Args[0]
17921 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17922 continue
17923 }
17924 _ = x1.Args[1]
17925 p1 := x1.Args[0]
17926 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17927 continue
17928 }
17929 b = mergePoint(b, x0, x1)
17930 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
17931 v.copyOf(v0)
17932 v0.AuxInt = int32ToAuxInt(i)
17933 v0.Aux = symToAux(s)
17934 v0.AddArg2(p0, mem)
17935 return true
17936 }
17937 break
17938 }
17939
17940
17941
17942 for {
17943 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17944 x0 := v_0
17945 if x0.Op != OpAMD64MOVLload {
17946 continue
17947 }
17948 i0 := auxIntToInt32(x0.AuxInt)
17949 s := auxToSym(x0.Aux)
17950 mem := x0.Args[1]
17951 p := x0.Args[0]
17952 sh := v_1
17953 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
17954 continue
17955 }
17956 x1 := sh.Args[0]
17957 if x1.Op != OpAMD64MOVLload {
17958 continue
17959 }
17960 i1 := auxIntToInt32(x1.AuxInt)
17961 if auxToSym(x1.Aux) != s {
17962 continue
17963 }
17964 _ = x1.Args[1]
17965 if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
17966 continue
17967 }
17968 b = mergePoint(b, x0, x1)
17969 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
17970 v.copyOf(v0)
17971 v0.AuxInt = int32ToAuxInt(i0)
17972 v0.Aux = symToAux(s)
17973 v0.AddArg2(p, mem)
17974 return true
17975 }
17976 break
17977 }
17978
17979
17980
17981 for {
17982 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
17983 x0 := v_0
17984 if x0.Op != OpAMD64MOVLload {
17985 continue
17986 }
17987 i := auxIntToInt32(x0.AuxInt)
17988 s := auxToSym(x0.Aux)
17989 mem := x0.Args[1]
17990 p0 := x0.Args[0]
17991 sh := v_1
17992 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
17993 continue
17994 }
17995 x1 := sh.Args[0]
17996 if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
17997 continue
17998 }
17999 _ = x1.Args[1]
18000 p1 := x1.Args[0]
18001 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18002 continue
18003 }
18004 b = mergePoint(b, x0, x1)
18005 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
18006 v.copyOf(v0)
18007 v0.AuxInt = int32ToAuxInt(i)
18008 v0.Aux = symToAux(s)
18009 v0.AddArg2(p0, mem)
18010 return true
18011 }
18012 break
18013 }
18014
18015
18016
18017 for {
18018 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18019 s1 := v_0
18020 if s1.Op != OpAMD64SHLQconst {
18021 continue
18022 }
18023 j1 := auxIntToInt8(s1.AuxInt)
18024 x1 := s1.Args[0]
18025 if x1.Op != OpAMD64MOVBload {
18026 continue
18027 }
18028 i1 := auxIntToInt32(x1.AuxInt)
18029 s := auxToSym(x1.Aux)
18030 mem := x1.Args[1]
18031 p := x1.Args[0]
18032 or := v_1
18033 if or.Op != OpAMD64ORQ {
18034 continue
18035 }
18036 _ = or.Args[1]
18037 or_0 := or.Args[0]
18038 or_1 := or.Args[1]
18039 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18040 s0 := or_0
18041 if s0.Op != OpAMD64SHLQconst {
18042 continue
18043 }
18044 j0 := auxIntToInt8(s0.AuxInt)
18045 x0 := s0.Args[0]
18046 if x0.Op != OpAMD64MOVBload {
18047 continue
18048 }
18049 i0 := auxIntToInt32(x0.AuxInt)
18050 if auxToSym(x0.Aux) != s {
18051 continue
18052 }
18053 _ = x0.Args[1]
18054 if p != x0.Args[0] || mem != x0.Args[1] {
18055 continue
18056 }
18057 y := or_1
18058 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18059 continue
18060 }
18061 b = mergePoint(b, x0, x1, y)
18062 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
18063 v.copyOf(v0)
18064 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
18065 v1.AuxInt = int8ToAuxInt(j0)
18066 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
18067 v2.AuxInt = int32ToAuxInt(i0)
18068 v2.Aux = symToAux(s)
18069 v2.AddArg2(p, mem)
18070 v1.AddArg(v2)
18071 v0.AddArg2(v1, y)
18072 return true
18073 }
18074 }
18075 break
18076 }
18077
18078
18079
18080 for {
18081 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18082 s1 := v_0
18083 if s1.Op != OpAMD64SHLQconst {
18084 continue
18085 }
18086 j1 := auxIntToInt8(s1.AuxInt)
18087 x1 := s1.Args[0]
18088 if x1.Op != OpAMD64MOVBload {
18089 continue
18090 }
18091 i := auxIntToInt32(x1.AuxInt)
18092 s := auxToSym(x1.Aux)
18093 mem := x1.Args[1]
18094 p1 := x1.Args[0]
18095 or := v_1
18096 if or.Op != OpAMD64ORQ {
18097 continue
18098 }
18099 _ = or.Args[1]
18100 or_0 := or.Args[0]
18101 or_1 := or.Args[1]
18102 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18103 s0 := or_0
18104 if s0.Op != OpAMD64SHLQconst {
18105 continue
18106 }
18107 j0 := auxIntToInt8(s0.AuxInt)
18108 x0 := s0.Args[0]
18109 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18110 continue
18111 }
18112 _ = x0.Args[1]
18113 p0 := x0.Args[0]
18114 if mem != x0.Args[1] {
18115 continue
18116 }
18117 y := or_1
18118 if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18119 continue
18120 }
18121 b = mergePoint(b, x0, x1, y)
18122 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
18123 v.copyOf(v0)
18124 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
18125 v1.AuxInt = int8ToAuxInt(j0)
18126 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
18127 v2.AuxInt = int32ToAuxInt(i)
18128 v2.Aux = symToAux(s)
18129 v2.AddArg2(p0, mem)
18130 v1.AddArg(v2)
18131 v0.AddArg2(v1, y)
18132 return true
18133 }
18134 }
18135 break
18136 }
18137
18138
18139
18140 for {
18141 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18142 s1 := v_0
18143 if s1.Op != OpAMD64SHLQconst {
18144 continue
18145 }
18146 j1 := auxIntToInt8(s1.AuxInt)
18147 x1 := s1.Args[0]
18148 if x1.Op != OpAMD64MOVWload {
18149 continue
18150 }
18151 i1 := auxIntToInt32(x1.AuxInt)
18152 s := auxToSym(x1.Aux)
18153 mem := x1.Args[1]
18154 p := x1.Args[0]
18155 or := v_1
18156 if or.Op != OpAMD64ORQ {
18157 continue
18158 }
18159 _ = or.Args[1]
18160 or_0 := or.Args[0]
18161 or_1 := or.Args[1]
18162 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18163 s0 := or_0
18164 if s0.Op != OpAMD64SHLQconst {
18165 continue
18166 }
18167 j0 := auxIntToInt8(s0.AuxInt)
18168 x0 := s0.Args[0]
18169 if x0.Op != OpAMD64MOVWload {
18170 continue
18171 }
18172 i0 := auxIntToInt32(x0.AuxInt)
18173 if auxToSym(x0.Aux) != s {
18174 continue
18175 }
18176 _ = x0.Args[1]
18177 if p != x0.Args[0] || mem != x0.Args[1] {
18178 continue
18179 }
18180 y := or_1
18181 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18182 continue
18183 }
18184 b = mergePoint(b, x0, x1, y)
18185 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
18186 v.copyOf(v0)
18187 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
18188 v1.AuxInt = int8ToAuxInt(j0)
18189 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18190 v2.AuxInt = int32ToAuxInt(i0)
18191 v2.Aux = symToAux(s)
18192 v2.AddArg2(p, mem)
18193 v1.AddArg(v2)
18194 v0.AddArg2(v1, y)
18195 return true
18196 }
18197 }
18198 break
18199 }
18200
18201
18202
18203 for {
18204 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18205 s1 := v_0
18206 if s1.Op != OpAMD64SHLQconst {
18207 continue
18208 }
18209 j1 := auxIntToInt8(s1.AuxInt)
18210 x1 := s1.Args[0]
18211 if x1.Op != OpAMD64MOVWload {
18212 continue
18213 }
18214 i := auxIntToInt32(x1.AuxInt)
18215 s := auxToSym(x1.Aux)
18216 mem := x1.Args[1]
18217 p1 := x1.Args[0]
18218 or := v_1
18219 if or.Op != OpAMD64ORQ {
18220 continue
18221 }
18222 _ = or.Args[1]
18223 or_0 := or.Args[0]
18224 or_1 := or.Args[1]
18225 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18226 s0 := or_0
18227 if s0.Op != OpAMD64SHLQconst {
18228 continue
18229 }
18230 j0 := auxIntToInt8(s0.AuxInt)
18231 x0 := s0.Args[0]
18232 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18233 continue
18234 }
18235 _ = x0.Args[1]
18236 p0 := x0.Args[0]
18237 if mem != x0.Args[1] {
18238 continue
18239 }
18240 y := or_1
18241 if !(j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18242 continue
18243 }
18244 b = mergePoint(b, x0, x1, y)
18245 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
18246 v.copyOf(v0)
18247 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
18248 v1.AuxInt = int8ToAuxInt(j0)
18249 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18250 v2.AuxInt = int32ToAuxInt(i)
18251 v2.Aux = symToAux(s)
18252 v2.AddArg2(p0, mem)
18253 v1.AddArg(v2)
18254 v0.AddArg2(v1, y)
18255 return true
18256 }
18257 }
18258 break
18259 }
18260
18261
18262
18263 for {
18264 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18265 x1 := v_0
18266 if x1.Op != OpAMD64MOVBload {
18267 continue
18268 }
18269 i1 := auxIntToInt32(x1.AuxInt)
18270 s := auxToSym(x1.Aux)
18271 mem := x1.Args[1]
18272 p := x1.Args[0]
18273 sh := v_1
18274 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
18275 continue
18276 }
18277 x0 := sh.Args[0]
18278 if x0.Op != OpAMD64MOVBload {
18279 continue
18280 }
18281 i0 := auxIntToInt32(x0.AuxInt)
18282 if auxToSym(x0.Aux) != s {
18283 continue
18284 }
18285 _ = x0.Args[1]
18286 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18287 continue
18288 }
18289 b = mergePoint(b, x0, x1)
18290 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
18291 v.copyOf(v0)
18292 v0.AuxInt = int8ToAuxInt(8)
18293 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
18294 v1.AuxInt = int32ToAuxInt(i0)
18295 v1.Aux = symToAux(s)
18296 v1.AddArg2(p, mem)
18297 v0.AddArg(v1)
18298 return true
18299 }
18300 break
18301 }
18302
18303
18304
18305 for {
18306 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18307 x1 := v_0
18308 if x1.Op != OpAMD64MOVBload {
18309 continue
18310 }
18311 i := auxIntToInt32(x1.AuxInt)
18312 s := auxToSym(x1.Aux)
18313 mem := x1.Args[1]
18314 p1 := x1.Args[0]
18315 sh := v_1
18316 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
18317 continue
18318 }
18319 x0 := sh.Args[0]
18320 if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18321 continue
18322 }
18323 _ = x0.Args[1]
18324 p0 := x0.Args[0]
18325 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18326 continue
18327 }
18328 b = mergePoint(b, x0, x1)
18329 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
18330 v.copyOf(v0)
18331 v0.AuxInt = int8ToAuxInt(8)
18332 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
18333 v1.AuxInt = int32ToAuxInt(i)
18334 v1.Aux = symToAux(s)
18335 v1.AddArg2(p0, mem)
18336 v0.AddArg(v1)
18337 return true
18338 }
18339 break
18340 }
18341
18342
18343
18344 for {
18345 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18346 r1 := v_0
18347 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18348 continue
18349 }
18350 x1 := r1.Args[0]
18351 if x1.Op != OpAMD64MOVWload {
18352 continue
18353 }
18354 i1 := auxIntToInt32(x1.AuxInt)
18355 s := auxToSym(x1.Aux)
18356 mem := x1.Args[1]
18357 p := x1.Args[0]
18358 sh := v_1
18359 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
18360 continue
18361 }
18362 r0 := sh.Args[0]
18363 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18364 continue
18365 }
18366 x0 := r0.Args[0]
18367 if x0.Op != OpAMD64MOVWload {
18368 continue
18369 }
18370 i0 := auxIntToInt32(x0.AuxInt)
18371 if auxToSym(x0.Aux) != s {
18372 continue
18373 }
18374 _ = x0.Args[1]
18375 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18376 continue
18377 }
18378 b = mergePoint(b, x0, x1)
18379 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
18380 v.copyOf(v0)
18381 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18382 v1.AuxInt = int32ToAuxInt(i0)
18383 v1.Aux = symToAux(s)
18384 v1.AddArg2(p, mem)
18385 v0.AddArg(v1)
18386 return true
18387 }
18388 break
18389 }
18390
18391
18392
18393 for {
18394 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18395 r1 := v_0
18396 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18397 continue
18398 }
18399 x1 := r1.Args[0]
18400 if x1.Op != OpAMD64MOVWload {
18401 continue
18402 }
18403 i := auxIntToInt32(x1.AuxInt)
18404 s := auxToSym(x1.Aux)
18405 mem := x1.Args[1]
18406 p1 := x1.Args[0]
18407 sh := v_1
18408 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
18409 continue
18410 }
18411 r0 := sh.Args[0]
18412 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18413 continue
18414 }
18415 x0 := r0.Args[0]
18416 if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18417 continue
18418 }
18419 _ = x0.Args[1]
18420 p0 := x0.Args[0]
18421 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18422 continue
18423 }
18424 b = mergePoint(b, x0, x1)
18425 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
18426 v.copyOf(v0)
18427 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
18428 v1.AuxInt = int32ToAuxInt(i)
18429 v1.Aux = symToAux(s)
18430 v1.AddArg2(p0, mem)
18431 v0.AddArg(v1)
18432 return true
18433 }
18434 break
18435 }
18436
18437
18438
18439 for {
18440 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18441 r1 := v_0
18442 if r1.Op != OpAMD64BSWAPL {
18443 continue
18444 }
18445 x1 := r1.Args[0]
18446 if x1.Op != OpAMD64MOVLload {
18447 continue
18448 }
18449 i1 := auxIntToInt32(x1.AuxInt)
18450 s := auxToSym(x1.Aux)
18451 mem := x1.Args[1]
18452 p := x1.Args[0]
18453 sh := v_1
18454 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
18455 continue
18456 }
18457 r0 := sh.Args[0]
18458 if r0.Op != OpAMD64BSWAPL {
18459 continue
18460 }
18461 x0 := r0.Args[0]
18462 if x0.Op != OpAMD64MOVLload {
18463 continue
18464 }
18465 i0 := auxIntToInt32(x0.AuxInt)
18466 if auxToSym(x0.Aux) != s {
18467 continue
18468 }
18469 _ = x0.Args[1]
18470 if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18471 continue
18472 }
18473 b = mergePoint(b, x0, x1)
18474 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
18475 v.copyOf(v0)
18476 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
18477 v1.AuxInt = int32ToAuxInt(i0)
18478 v1.Aux = symToAux(s)
18479 v1.AddArg2(p, mem)
18480 v0.AddArg(v1)
18481 return true
18482 }
18483 break
18484 }
18485
18486
18487
18488 for {
18489 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18490 r1 := v_0
18491 if r1.Op != OpAMD64BSWAPL {
18492 continue
18493 }
18494 x1 := r1.Args[0]
18495 if x1.Op != OpAMD64MOVLload {
18496 continue
18497 }
18498 i := auxIntToInt32(x1.AuxInt)
18499 s := auxToSym(x1.Aux)
18500 mem := x1.Args[1]
18501 p1 := x1.Args[0]
18502 sh := v_1
18503 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
18504 continue
18505 }
18506 r0 := sh.Args[0]
18507 if r0.Op != OpAMD64BSWAPL {
18508 continue
18509 }
18510 x0 := r0.Args[0]
18511 if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
18512 continue
18513 }
18514 _ = x0.Args[1]
18515 p0 := x0.Args[0]
18516 if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
18517 continue
18518 }
18519 b = mergePoint(b, x0, x1)
18520 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
18521 v.copyOf(v0)
18522 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
18523 v1.AuxInt = int32ToAuxInt(i)
18524 v1.Aux = symToAux(s)
18525 v1.AddArg2(p0, mem)
18526 v0.AddArg(v1)
18527 return true
18528 }
18529 break
18530 }
18531
18532
18533
18534 for {
18535 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18536 s0 := v_0
18537 if s0.Op != OpAMD64SHLQconst {
18538 continue
18539 }
18540 j0 := auxIntToInt8(s0.AuxInt)
18541 x0 := s0.Args[0]
18542 if x0.Op != OpAMD64MOVBload {
18543 continue
18544 }
18545 i0 := auxIntToInt32(x0.AuxInt)
18546 s := auxToSym(x0.Aux)
18547 mem := x0.Args[1]
18548 p := x0.Args[0]
18549 or := v_1
18550 if or.Op != OpAMD64ORQ {
18551 continue
18552 }
18553 _ = or.Args[1]
18554 or_0 := or.Args[0]
18555 or_1 := or.Args[1]
18556 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18557 s1 := or_0
18558 if s1.Op != OpAMD64SHLQconst {
18559 continue
18560 }
18561 j1 := auxIntToInt8(s1.AuxInt)
18562 x1 := s1.Args[0]
18563 if x1.Op != OpAMD64MOVBload {
18564 continue
18565 }
18566 i1 := auxIntToInt32(x1.AuxInt)
18567 if auxToSym(x1.Aux) != s {
18568 continue
18569 }
18570 _ = x1.Args[1]
18571 if p != x1.Args[0] || mem != x1.Args[1] {
18572 continue
18573 }
18574 y := or_1
18575 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18576 continue
18577 }
18578 b = mergePoint(b, x0, x1, y)
18579 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
18580 v.copyOf(v0)
18581 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
18582 v1.AuxInt = int8ToAuxInt(j1)
18583 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
18584 v2.AuxInt = int8ToAuxInt(8)
18585 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18586 v3.AuxInt = int32ToAuxInt(i0)
18587 v3.Aux = symToAux(s)
18588 v3.AddArg2(p, mem)
18589 v2.AddArg(v3)
18590 v1.AddArg(v2)
18591 v0.AddArg2(v1, y)
18592 return true
18593 }
18594 }
18595 break
18596 }
18597
18598
18599
18600 for {
18601 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18602 s0 := v_0
18603 if s0.Op != OpAMD64SHLQconst {
18604 continue
18605 }
18606 j0 := auxIntToInt8(s0.AuxInt)
18607 x0 := s0.Args[0]
18608 if x0.Op != OpAMD64MOVBload {
18609 continue
18610 }
18611 i := auxIntToInt32(x0.AuxInt)
18612 s := auxToSym(x0.Aux)
18613 mem := x0.Args[1]
18614 p0 := x0.Args[0]
18615 or := v_1
18616 if or.Op != OpAMD64ORQ {
18617 continue
18618 }
18619 _ = or.Args[1]
18620 or_0 := or.Args[0]
18621 or_1 := or.Args[1]
18622 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18623 s1 := or_0
18624 if s1.Op != OpAMD64SHLQconst {
18625 continue
18626 }
18627 j1 := auxIntToInt8(s1.AuxInt)
18628 x1 := s1.Args[0]
18629 if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18630 continue
18631 }
18632 _ = x1.Args[1]
18633 p1 := x1.Args[0]
18634 if mem != x1.Args[1] {
18635 continue
18636 }
18637 y := or_1
18638 if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
18639 continue
18640 }
18641 b = mergePoint(b, x0, x1, y)
18642 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
18643 v.copyOf(v0)
18644 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
18645 v1.AuxInt = int8ToAuxInt(j1)
18646 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
18647 v2.AuxInt = int8ToAuxInt(8)
18648 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
18649 v3.AuxInt = int32ToAuxInt(i)
18650 v3.Aux = symToAux(s)
18651 v3.AddArg2(p0, mem)
18652 v2.AddArg(v3)
18653 v1.AddArg(v2)
18654 v0.AddArg2(v1, y)
18655 return true
18656 }
18657 }
18658 break
18659 }
18660
18661
18662
18663 for {
18664 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18665 s0 := v_0
18666 if s0.Op != OpAMD64SHLQconst {
18667 continue
18668 }
18669 j0 := auxIntToInt8(s0.AuxInt)
18670 r0 := s0.Args[0]
18671 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18672 continue
18673 }
18674 x0 := r0.Args[0]
18675 if x0.Op != OpAMD64MOVWload {
18676 continue
18677 }
18678 i0 := auxIntToInt32(x0.AuxInt)
18679 s := auxToSym(x0.Aux)
18680 mem := x0.Args[1]
18681 p := x0.Args[0]
18682 or := v_1
18683 if or.Op != OpAMD64ORQ {
18684 continue
18685 }
18686 _ = or.Args[1]
18687 or_0 := or.Args[0]
18688 or_1 := or.Args[1]
18689 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18690 s1 := or_0
18691 if s1.Op != OpAMD64SHLQconst {
18692 continue
18693 }
18694 j1 := auxIntToInt8(s1.AuxInt)
18695 r1 := s1.Args[0]
18696 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18697 continue
18698 }
18699 x1 := r1.Args[0]
18700 if x1.Op != OpAMD64MOVWload {
18701 continue
18702 }
18703 i1 := auxIntToInt32(x1.AuxInt)
18704 if auxToSym(x1.Aux) != s {
18705 continue
18706 }
18707 _ = x1.Args[1]
18708 if p != x1.Args[0] || mem != x1.Args[1] {
18709 continue
18710 }
18711 y := or_1
18712 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
18713 continue
18714 }
18715 b = mergePoint(b, x0, x1, y)
18716 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
18717 v.copyOf(v0)
18718 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
18719 v1.AuxInt = int8ToAuxInt(j1)
18720 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
18721 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
18722 v3.AuxInt = int32ToAuxInt(i0)
18723 v3.Aux = symToAux(s)
18724 v3.AddArg2(p, mem)
18725 v2.AddArg(v3)
18726 v1.AddArg(v2)
18727 v0.AddArg2(v1, y)
18728 return true
18729 }
18730 }
18731 break
18732 }
18733
18734
18735
18736 for {
18737 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18738 s0 := v_0
18739 if s0.Op != OpAMD64SHLQconst {
18740 continue
18741 }
18742 j0 := auxIntToInt8(s0.AuxInt)
18743 r0 := s0.Args[0]
18744 if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
18745 continue
18746 }
18747 x0 := r0.Args[0]
18748 if x0.Op != OpAMD64MOVWload {
18749 continue
18750 }
18751 i := auxIntToInt32(x0.AuxInt)
18752 s := auxToSym(x0.Aux)
18753 mem := x0.Args[1]
18754 p0 := x0.Args[0]
18755 or := v_1
18756 if or.Op != OpAMD64ORQ {
18757 continue
18758 }
18759 _ = or.Args[1]
18760 or_0 := or.Args[0]
18761 or_1 := or.Args[1]
18762 for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
18763 s1 := or_0
18764 if s1.Op != OpAMD64SHLQconst {
18765 continue
18766 }
18767 j1 := auxIntToInt8(s1.AuxInt)
18768 r1 := s1.Args[0]
18769 if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
18770 continue
18771 }
18772 x1 := r1.Args[0]
18773 if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18774 continue
18775 }
18776 _ = x1.Args[1]
18777 p1 := x1.Args[0]
18778 if mem != x1.Args[1] {
18779 continue
18780 }
18781 y := or_1
18782 if !(j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
18783 continue
18784 }
18785 b = mergePoint(b, x0, x1, y)
18786 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
18787 v.copyOf(v0)
18788 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
18789 v1.AuxInt = int8ToAuxInt(j1)
18790 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
18791 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
18792 v3.AuxInt = int32ToAuxInt(i)
18793 v3.Aux = symToAux(s)
18794 v3.AddArg2(p0, mem)
18795 v2.AddArg(v3)
18796 v1.AddArg(v2)
18797 v0.AddArg2(v1, y)
18798 return true
18799 }
18800 }
18801 break
18802 }
18803
18804
18805
18806 for {
18807 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18808 x := v_0
18809 l := v_1
18810 if l.Op != OpAMD64MOVQload {
18811 continue
18812 }
18813 off := auxIntToInt32(l.AuxInt)
18814 sym := auxToSym(l.Aux)
18815 mem := l.Args[1]
18816 ptr := l.Args[0]
18817 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
18818 continue
18819 }
18820 v.reset(OpAMD64ORQload)
18821 v.AuxInt = int32ToAuxInt(off)
18822 v.Aux = symToAux(sym)
18823 v.AddArg3(x, ptr, mem)
18824 return true
18825 }
18826 break
18827 }
18828
18829
18830
18831 for {
18832 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18833 x0 := v_0
18834 if x0.Op != OpAMD64MOVBELload {
18835 continue
18836 }
18837 i0 := auxIntToInt32(x0.AuxInt)
18838 s := auxToSym(x0.Aux)
18839 mem := x0.Args[1]
18840 p := x0.Args[0]
18841 sh := v_1
18842 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
18843 continue
18844 }
18845 x1 := sh.Args[0]
18846 if x1.Op != OpAMD64MOVBELload {
18847 continue
18848 }
18849 i1 := auxIntToInt32(x1.AuxInt)
18850 if auxToSym(x1.Aux) != s {
18851 continue
18852 }
18853 _ = x1.Args[1]
18854 if p != x1.Args[0] || mem != x1.Args[1] || !(i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18855 continue
18856 }
18857 b = mergePoint(b, x0, x1)
18858 v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64)
18859 v.copyOf(v0)
18860 v0.AuxInt = int32ToAuxInt(i1)
18861 v0.Aux = symToAux(s)
18862 v0.AddArg2(p, mem)
18863 return true
18864 }
18865 break
18866 }
18867
18868
18869
18870 for {
18871 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
18872 x0 := v_0
18873 if x0.Op != OpAMD64MOVBELload {
18874 continue
18875 }
18876 i := auxIntToInt32(x0.AuxInt)
18877 s := auxToSym(x0.Aux)
18878 mem := x0.Args[1]
18879 p0 := x0.Args[0]
18880 sh := v_1
18881 if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
18882 continue
18883 }
18884 x1 := sh.Args[0]
18885 if x1.Op != OpAMD64MOVBELload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
18886 continue
18887 }
18888 _ = x1.Args[1]
18889 p1 := x1.Args[0]
18890 if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
18891 continue
18892 }
18893 b = mergePoint(b, x0, x1)
18894 v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64)
18895 v.copyOf(v0)
18896 v0.AuxInt = int32ToAuxInt(i)
18897 v0.Aux = symToAux(s)
18898 v0.AddArg2(p1, mem)
18899 return true
18900 }
18901 break
18902 }
18903 return false
18904 }
18905 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
18906 v_0 := v.Args[0]
18907
18908
18909
18910 for {
18911 c := auxIntToInt32(v.AuxInt)
18912 x := v_0
18913 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
18914 break
18915 }
18916 v.reset(OpAMD64BTSQconst)
18917 v.AuxInt = int8ToAuxInt(int8(log32(c)))
18918 v.AddArg(x)
18919 return true
18920 }
18921
18922
18923 for {
18924 c := auxIntToInt32(v.AuxInt)
18925 if v_0.Op != OpAMD64ORQconst {
18926 break
18927 }
18928 d := auxIntToInt32(v_0.AuxInt)
18929 x := v_0.Args[0]
18930 v.reset(OpAMD64ORQconst)
18931 v.AuxInt = int32ToAuxInt(c | d)
18932 v.AddArg(x)
18933 return true
18934 }
18935
18936
18937
18938 for {
18939 c := auxIntToInt32(v.AuxInt)
18940 if v_0.Op != OpAMD64BTSQconst {
18941 break
18942 }
18943 d := auxIntToInt8(v_0.AuxInt)
18944 x := v_0.Args[0]
18945 if !(is32Bit(int64(c) | 1<<uint32(d))) {
18946 break
18947 }
18948 v.reset(OpAMD64ORQconst)
18949 v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
18950 v.AddArg(x)
18951 return true
18952 }
18953
18954
18955 for {
18956 if auxIntToInt32(v.AuxInt) != 0 {
18957 break
18958 }
18959 x := v_0
18960 v.copyOf(x)
18961 return true
18962 }
18963
18964
18965 for {
18966 if auxIntToInt32(v.AuxInt) != -1 {
18967 break
18968 }
18969 v.reset(OpAMD64MOVQconst)
18970 v.AuxInt = int64ToAuxInt(-1)
18971 return true
18972 }
18973
18974
18975 for {
18976 c := auxIntToInt32(v.AuxInt)
18977 if v_0.Op != OpAMD64MOVQconst {
18978 break
18979 }
18980 d := auxIntToInt64(v_0.AuxInt)
18981 v.reset(OpAMD64MOVQconst)
18982 v.AuxInt = int64ToAuxInt(int64(c) | d)
18983 return true
18984 }
18985 return false
18986 }
18987 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
18988 v_1 := v.Args[1]
18989 v_0 := v.Args[0]
18990
18991
18992
18993 for {
18994 valoff1 := auxIntToValAndOff(v.AuxInt)
18995 sym := auxToSym(v.Aux)
18996 if v_0.Op != OpAMD64ADDQconst {
18997 break
18998 }
18999 off2 := auxIntToInt32(v_0.AuxInt)
19000 base := v_0.Args[0]
19001 mem := v_1
19002 if !(ValAndOff(valoff1).canAdd32(off2)) {
19003 break
19004 }
19005 v.reset(OpAMD64ORQconstmodify)
19006 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
19007 v.Aux = symToAux(sym)
19008 v.AddArg2(base, mem)
19009 return true
19010 }
19011
19012
19013
19014 for {
19015 valoff1 := auxIntToValAndOff(v.AuxInt)
19016 sym1 := auxToSym(v.Aux)
19017 if v_0.Op != OpAMD64LEAQ {
19018 break
19019 }
19020 off2 := auxIntToInt32(v_0.AuxInt)
19021 sym2 := auxToSym(v_0.Aux)
19022 base := v_0.Args[0]
19023 mem := v_1
19024 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
19025 break
19026 }
19027 v.reset(OpAMD64ORQconstmodify)
19028 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
19029 v.Aux = symToAux(mergeSym(sym1, sym2))
19030 v.AddArg2(base, mem)
19031 return true
19032 }
19033 return false
19034 }
19035 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
19036 v_2 := v.Args[2]
19037 v_1 := v.Args[1]
19038 v_0 := v.Args[0]
19039 b := v.Block
19040 typ := &b.Func.Config.Types
19041
19042
19043
19044 for {
19045 off1 := auxIntToInt32(v.AuxInt)
19046 sym := auxToSym(v.Aux)
19047 val := v_0
19048 if v_1.Op != OpAMD64ADDQconst {
19049 break
19050 }
19051 off2 := auxIntToInt32(v_1.AuxInt)
19052 base := v_1.Args[0]
19053 mem := v_2
19054 if !(is32Bit(int64(off1) + int64(off2))) {
19055 break
19056 }
19057 v.reset(OpAMD64ORQload)
19058 v.AuxInt = int32ToAuxInt(off1 + off2)
19059 v.Aux = symToAux(sym)
19060 v.AddArg3(val, base, mem)
19061 return true
19062 }
19063
19064
19065
19066 for {
19067 off1 := auxIntToInt32(v.AuxInt)
19068 sym1 := auxToSym(v.Aux)
19069 val := v_0
19070 if v_1.Op != OpAMD64LEAQ {
19071 break
19072 }
19073 off2 := auxIntToInt32(v_1.AuxInt)
19074 sym2 := auxToSym(v_1.Aux)
19075 base := v_1.Args[0]
19076 mem := v_2
19077 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19078 break
19079 }
19080 v.reset(OpAMD64ORQload)
19081 v.AuxInt = int32ToAuxInt(off1 + off2)
19082 v.Aux = symToAux(mergeSym(sym1, sym2))
19083 v.AddArg3(val, base, mem)
19084 return true
19085 }
19086
19087
19088 for {
19089 off := auxIntToInt32(v.AuxInt)
19090 sym := auxToSym(v.Aux)
19091 x := v_0
19092 ptr := v_1
19093 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
19094 break
19095 }
19096 y := v_2.Args[1]
19097 if ptr != v_2.Args[0] {
19098 break
19099 }
19100 v.reset(OpAMD64ORQ)
19101 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
19102 v0.AddArg(y)
19103 v.AddArg2(x, v0)
19104 return true
19105 }
19106 return false
19107 }
19108 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
19109 v_2 := v.Args[2]
19110 v_1 := v.Args[1]
19111 v_0 := v.Args[0]
19112
19113
19114
19115 for {
19116 off1 := auxIntToInt32(v.AuxInt)
19117 sym := auxToSym(v.Aux)
19118 if v_0.Op != OpAMD64ADDQconst {
19119 break
19120 }
19121 off2 := auxIntToInt32(v_0.AuxInt)
19122 base := v_0.Args[0]
19123 val := v_1
19124 mem := v_2
19125 if !(is32Bit(int64(off1) + int64(off2))) {
19126 break
19127 }
19128 v.reset(OpAMD64ORQmodify)
19129 v.AuxInt = int32ToAuxInt(off1 + off2)
19130 v.Aux = symToAux(sym)
19131 v.AddArg3(base, val, mem)
19132 return true
19133 }
19134
19135
19136
19137 for {
19138 off1 := auxIntToInt32(v.AuxInt)
19139 sym1 := auxToSym(v.Aux)
19140 if v_0.Op != OpAMD64LEAQ {
19141 break
19142 }
19143 off2 := auxIntToInt32(v_0.AuxInt)
19144 sym2 := auxToSym(v_0.Aux)
19145 base := v_0.Args[0]
19146 val := v_1
19147 mem := v_2
19148 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19149 break
19150 }
19151 v.reset(OpAMD64ORQmodify)
19152 v.AuxInt = int32ToAuxInt(off1 + off2)
19153 v.Aux = symToAux(mergeSym(sym1, sym2))
19154 v.AddArg3(base, val, mem)
19155 return true
19156 }
19157 return false
19158 }
19159 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
19160 v_1 := v.Args[1]
19161 v_0 := v.Args[0]
19162
19163
19164 for {
19165 x := v_0
19166 if v_1.Op != OpAMD64NEGQ {
19167 break
19168 }
19169 y := v_1.Args[0]
19170 v.reset(OpAMD64RORB)
19171 v.AddArg2(x, y)
19172 return true
19173 }
19174
19175
19176 for {
19177 x := v_0
19178 if v_1.Op != OpAMD64NEGL {
19179 break
19180 }
19181 y := v_1.Args[0]
19182 v.reset(OpAMD64RORB)
19183 v.AddArg2(x, y)
19184 return true
19185 }
19186
19187
19188 for {
19189 x := v_0
19190 if v_1.Op != OpAMD64MOVQconst {
19191 break
19192 }
19193 c := auxIntToInt64(v_1.AuxInt)
19194 v.reset(OpAMD64ROLBconst)
19195 v.AuxInt = int8ToAuxInt(int8(c & 7))
19196 v.AddArg(x)
19197 return true
19198 }
19199
19200
19201 for {
19202 x := v_0
19203 if v_1.Op != OpAMD64MOVLconst {
19204 break
19205 }
19206 c := auxIntToInt32(v_1.AuxInt)
19207 v.reset(OpAMD64ROLBconst)
19208 v.AuxInt = int8ToAuxInt(int8(c & 7))
19209 v.AddArg(x)
19210 return true
19211 }
19212 return false
19213 }
19214 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
19215 v_0 := v.Args[0]
19216
19217
19218 for {
19219 c := auxIntToInt8(v.AuxInt)
19220 if v_0.Op != OpAMD64ROLBconst {
19221 break
19222 }
19223 d := auxIntToInt8(v_0.AuxInt)
19224 x := v_0.Args[0]
19225 v.reset(OpAMD64ROLBconst)
19226 v.AuxInt = int8ToAuxInt((c + d) & 7)
19227 v.AddArg(x)
19228 return true
19229 }
19230
19231
19232 for {
19233 if auxIntToInt8(v.AuxInt) != 0 {
19234 break
19235 }
19236 x := v_0
19237 v.copyOf(x)
19238 return true
19239 }
19240 return false
19241 }
19242 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
19243 v_1 := v.Args[1]
19244 v_0 := v.Args[0]
19245
19246
19247 for {
19248 x := v_0
19249 if v_1.Op != OpAMD64NEGQ {
19250 break
19251 }
19252 y := v_1.Args[0]
19253 v.reset(OpAMD64RORL)
19254 v.AddArg2(x, y)
19255 return true
19256 }
19257
19258
19259 for {
19260 x := v_0
19261 if v_1.Op != OpAMD64NEGL {
19262 break
19263 }
19264 y := v_1.Args[0]
19265 v.reset(OpAMD64RORL)
19266 v.AddArg2(x, y)
19267 return true
19268 }
19269
19270
19271 for {
19272 x := v_0
19273 if v_1.Op != OpAMD64MOVQconst {
19274 break
19275 }
19276 c := auxIntToInt64(v_1.AuxInt)
19277 v.reset(OpAMD64ROLLconst)
19278 v.AuxInt = int8ToAuxInt(int8(c & 31))
19279 v.AddArg(x)
19280 return true
19281 }
19282
19283
19284 for {
19285 x := v_0
19286 if v_1.Op != OpAMD64MOVLconst {
19287 break
19288 }
19289 c := auxIntToInt32(v_1.AuxInt)
19290 v.reset(OpAMD64ROLLconst)
19291 v.AuxInt = int8ToAuxInt(int8(c & 31))
19292 v.AddArg(x)
19293 return true
19294 }
19295 return false
19296 }
19297 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
19298 v_0 := v.Args[0]
19299
19300
19301 for {
19302 c := auxIntToInt8(v.AuxInt)
19303 if v_0.Op != OpAMD64ROLLconst {
19304 break
19305 }
19306 d := auxIntToInt8(v_0.AuxInt)
19307 x := v_0.Args[0]
19308 v.reset(OpAMD64ROLLconst)
19309 v.AuxInt = int8ToAuxInt((c + d) & 31)
19310 v.AddArg(x)
19311 return true
19312 }
19313
19314
19315 for {
19316 if auxIntToInt8(v.AuxInt) != 0 {
19317 break
19318 }
19319 x := v_0
19320 v.copyOf(x)
19321 return true
19322 }
19323 return false
19324 }
19325 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
19326 v_1 := v.Args[1]
19327 v_0 := v.Args[0]
19328
19329
19330 for {
19331 x := v_0
19332 if v_1.Op != OpAMD64NEGQ {
19333 break
19334 }
19335 y := v_1.Args[0]
19336 v.reset(OpAMD64RORQ)
19337 v.AddArg2(x, y)
19338 return true
19339 }
19340
19341
19342 for {
19343 x := v_0
19344 if v_1.Op != OpAMD64NEGL {
19345 break
19346 }
19347 y := v_1.Args[0]
19348 v.reset(OpAMD64RORQ)
19349 v.AddArg2(x, y)
19350 return true
19351 }
19352
19353
19354 for {
19355 x := v_0
19356 if v_1.Op != OpAMD64MOVQconst {
19357 break
19358 }
19359 c := auxIntToInt64(v_1.AuxInt)
19360 v.reset(OpAMD64ROLQconst)
19361 v.AuxInt = int8ToAuxInt(int8(c & 63))
19362 v.AddArg(x)
19363 return true
19364 }
19365
19366
19367 for {
19368 x := v_0
19369 if v_1.Op != OpAMD64MOVLconst {
19370 break
19371 }
19372 c := auxIntToInt32(v_1.AuxInt)
19373 v.reset(OpAMD64ROLQconst)
19374 v.AuxInt = int8ToAuxInt(int8(c & 63))
19375 v.AddArg(x)
19376 return true
19377 }
19378 return false
19379 }
19380 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
19381 v_0 := v.Args[0]
19382
19383
19384 for {
19385 c := auxIntToInt8(v.AuxInt)
19386 if v_0.Op != OpAMD64ROLQconst {
19387 break
19388 }
19389 d := auxIntToInt8(v_0.AuxInt)
19390 x := v_0.Args[0]
19391 v.reset(OpAMD64ROLQconst)
19392 v.AuxInt = int8ToAuxInt((c + d) & 63)
19393 v.AddArg(x)
19394 return true
19395 }
19396
19397
19398 for {
19399 if auxIntToInt8(v.AuxInt) != 0 {
19400 break
19401 }
19402 x := v_0
19403 v.copyOf(x)
19404 return true
19405 }
19406 return false
19407 }
19408 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
19409 v_1 := v.Args[1]
19410 v_0 := v.Args[0]
19411
19412
19413 for {
19414 x := v_0
19415 if v_1.Op != OpAMD64NEGQ {
19416 break
19417 }
19418 y := v_1.Args[0]
19419 v.reset(OpAMD64RORW)
19420 v.AddArg2(x, y)
19421 return true
19422 }
19423
19424
19425 for {
19426 x := v_0
19427 if v_1.Op != OpAMD64NEGL {
19428 break
19429 }
19430 y := v_1.Args[0]
19431 v.reset(OpAMD64RORW)
19432 v.AddArg2(x, y)
19433 return true
19434 }
19435
19436
19437 for {
19438 x := v_0
19439 if v_1.Op != OpAMD64MOVQconst {
19440 break
19441 }
19442 c := auxIntToInt64(v_1.AuxInt)
19443 v.reset(OpAMD64ROLWconst)
19444 v.AuxInt = int8ToAuxInt(int8(c & 15))
19445 v.AddArg(x)
19446 return true
19447 }
19448
19449
19450 for {
19451 x := v_0
19452 if v_1.Op != OpAMD64MOVLconst {
19453 break
19454 }
19455 c := auxIntToInt32(v_1.AuxInt)
19456 v.reset(OpAMD64ROLWconst)
19457 v.AuxInt = int8ToAuxInt(int8(c & 15))
19458 v.AddArg(x)
19459 return true
19460 }
19461 return false
19462 }
19463 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
19464 v_0 := v.Args[0]
19465
19466
19467 for {
19468 c := auxIntToInt8(v.AuxInt)
19469 if v_0.Op != OpAMD64ROLWconst {
19470 break
19471 }
19472 d := auxIntToInt8(v_0.AuxInt)
19473 x := v_0.Args[0]
19474 v.reset(OpAMD64ROLWconst)
19475 v.AuxInt = int8ToAuxInt((c + d) & 15)
19476 v.AddArg(x)
19477 return true
19478 }
19479
19480
19481 for {
19482 if auxIntToInt8(v.AuxInt) != 0 {
19483 break
19484 }
19485 x := v_0
19486 v.copyOf(x)
19487 return true
19488 }
19489 return false
19490 }
19491 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
19492 v_1 := v.Args[1]
19493 v_0 := v.Args[0]
19494
19495
19496 for {
19497 x := v_0
19498 if v_1.Op != OpAMD64NEGQ {
19499 break
19500 }
19501 y := v_1.Args[0]
19502 v.reset(OpAMD64ROLB)
19503 v.AddArg2(x, y)
19504 return true
19505 }
19506
19507
19508 for {
19509 x := v_0
19510 if v_1.Op != OpAMD64NEGL {
19511 break
19512 }
19513 y := v_1.Args[0]
19514 v.reset(OpAMD64ROLB)
19515 v.AddArg2(x, y)
19516 return true
19517 }
19518
19519
19520 for {
19521 x := v_0
19522 if v_1.Op != OpAMD64MOVQconst {
19523 break
19524 }
19525 c := auxIntToInt64(v_1.AuxInt)
19526 v.reset(OpAMD64ROLBconst)
19527 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
19528 v.AddArg(x)
19529 return true
19530 }
19531
19532
19533 for {
19534 x := v_0
19535 if v_1.Op != OpAMD64MOVLconst {
19536 break
19537 }
19538 c := auxIntToInt32(v_1.AuxInt)
19539 v.reset(OpAMD64ROLBconst)
19540 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
19541 v.AddArg(x)
19542 return true
19543 }
19544 return false
19545 }
19546 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
19547 v_1 := v.Args[1]
19548 v_0 := v.Args[0]
19549
19550
19551 for {
19552 x := v_0
19553 if v_1.Op != OpAMD64NEGQ {
19554 break
19555 }
19556 y := v_1.Args[0]
19557 v.reset(OpAMD64ROLL)
19558 v.AddArg2(x, y)
19559 return true
19560 }
19561
19562
19563 for {
19564 x := v_0
19565 if v_1.Op != OpAMD64NEGL {
19566 break
19567 }
19568 y := v_1.Args[0]
19569 v.reset(OpAMD64ROLL)
19570 v.AddArg2(x, y)
19571 return true
19572 }
19573
19574
19575 for {
19576 x := v_0
19577 if v_1.Op != OpAMD64MOVQconst {
19578 break
19579 }
19580 c := auxIntToInt64(v_1.AuxInt)
19581 v.reset(OpAMD64ROLLconst)
19582 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
19583 v.AddArg(x)
19584 return true
19585 }
19586
19587
19588 for {
19589 x := v_0
19590 if v_1.Op != OpAMD64MOVLconst {
19591 break
19592 }
19593 c := auxIntToInt32(v_1.AuxInt)
19594 v.reset(OpAMD64ROLLconst)
19595 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
19596 v.AddArg(x)
19597 return true
19598 }
19599 return false
19600 }
19601 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
19602 v_1 := v.Args[1]
19603 v_0 := v.Args[0]
19604
19605
19606 for {
19607 x := v_0
19608 if v_1.Op != OpAMD64NEGQ {
19609 break
19610 }
19611 y := v_1.Args[0]
19612 v.reset(OpAMD64ROLQ)
19613 v.AddArg2(x, y)
19614 return true
19615 }
19616
19617
19618 for {
19619 x := v_0
19620 if v_1.Op != OpAMD64NEGL {
19621 break
19622 }
19623 y := v_1.Args[0]
19624 v.reset(OpAMD64ROLQ)
19625 v.AddArg2(x, y)
19626 return true
19627 }
19628
19629
19630 for {
19631 x := v_0
19632 if v_1.Op != OpAMD64MOVQconst {
19633 break
19634 }
19635 c := auxIntToInt64(v_1.AuxInt)
19636 v.reset(OpAMD64ROLQconst)
19637 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
19638 v.AddArg(x)
19639 return true
19640 }
19641
19642
19643 for {
19644 x := v_0
19645 if v_1.Op != OpAMD64MOVLconst {
19646 break
19647 }
19648 c := auxIntToInt32(v_1.AuxInt)
19649 v.reset(OpAMD64ROLQconst)
19650 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
19651 v.AddArg(x)
19652 return true
19653 }
19654 return false
19655 }
19656 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
19657 v_1 := v.Args[1]
19658 v_0 := v.Args[0]
19659
19660
19661 for {
19662 x := v_0
19663 if v_1.Op != OpAMD64NEGQ {
19664 break
19665 }
19666 y := v_1.Args[0]
19667 v.reset(OpAMD64ROLW)
19668 v.AddArg2(x, y)
19669 return true
19670 }
19671
19672
19673 for {
19674 x := v_0
19675 if v_1.Op != OpAMD64NEGL {
19676 break
19677 }
19678 y := v_1.Args[0]
19679 v.reset(OpAMD64ROLW)
19680 v.AddArg2(x, y)
19681 return true
19682 }
19683
19684
19685 for {
19686 x := v_0
19687 if v_1.Op != OpAMD64MOVQconst {
19688 break
19689 }
19690 c := auxIntToInt64(v_1.AuxInt)
19691 v.reset(OpAMD64ROLWconst)
19692 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
19693 v.AddArg(x)
19694 return true
19695 }
19696
19697
19698 for {
19699 x := v_0
19700 if v_1.Op != OpAMD64MOVLconst {
19701 break
19702 }
19703 c := auxIntToInt32(v_1.AuxInt)
19704 v.reset(OpAMD64ROLWconst)
19705 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
19706 v.AddArg(x)
19707 return true
19708 }
19709 return false
19710 }
19711 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
19712 v_1 := v.Args[1]
19713 v_0 := v.Args[0]
19714
19715
19716 for {
19717 x := v_0
19718 if v_1.Op != OpAMD64MOVQconst {
19719 break
19720 }
19721 c := auxIntToInt64(v_1.AuxInt)
19722 v.reset(OpAMD64SARBconst)
19723 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
19724 v.AddArg(x)
19725 return true
19726 }
19727
19728
19729 for {
19730 x := v_0
19731 if v_1.Op != OpAMD64MOVLconst {
19732 break
19733 }
19734 c := auxIntToInt32(v_1.AuxInt)
19735 v.reset(OpAMD64SARBconst)
19736 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
19737 v.AddArg(x)
19738 return true
19739 }
19740 return false
19741 }
19742 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
19743 v_0 := v.Args[0]
19744
19745
19746 for {
19747 if auxIntToInt8(v.AuxInt) != 0 {
19748 break
19749 }
19750 x := v_0
19751 v.copyOf(x)
19752 return true
19753 }
19754
19755
19756 for {
19757 c := auxIntToInt8(v.AuxInt)
19758 if v_0.Op != OpAMD64MOVQconst {
19759 break
19760 }
19761 d := auxIntToInt64(v_0.AuxInt)
19762 v.reset(OpAMD64MOVQconst)
19763 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
19764 return true
19765 }
19766 return false
19767 }
19768 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
19769 v_1 := v.Args[1]
19770 v_0 := v.Args[0]
19771 b := v.Block
19772
19773
19774 for {
19775 x := v_0
19776 if v_1.Op != OpAMD64MOVQconst {
19777 break
19778 }
19779 c := auxIntToInt64(v_1.AuxInt)
19780 v.reset(OpAMD64SARLconst)
19781 v.AuxInt = int8ToAuxInt(int8(c & 31))
19782 v.AddArg(x)
19783 return true
19784 }
19785
19786
19787 for {
19788 x := v_0
19789 if v_1.Op != OpAMD64MOVLconst {
19790 break
19791 }
19792 c := auxIntToInt32(v_1.AuxInt)
19793 v.reset(OpAMD64SARLconst)
19794 v.AuxInt = int8ToAuxInt(int8(c & 31))
19795 v.AddArg(x)
19796 return true
19797 }
19798
19799
19800
19801 for {
19802 x := v_0
19803 if v_1.Op != OpAMD64ADDQconst {
19804 break
19805 }
19806 c := auxIntToInt32(v_1.AuxInt)
19807 y := v_1.Args[0]
19808 if !(c&31 == 0) {
19809 break
19810 }
19811 v.reset(OpAMD64SARL)
19812 v.AddArg2(x, y)
19813 return true
19814 }
19815
19816
19817
19818 for {
19819 x := v_0
19820 if v_1.Op != OpAMD64NEGQ {
19821 break
19822 }
19823 t := v_1.Type
19824 v_1_0 := v_1.Args[0]
19825 if v_1_0.Op != OpAMD64ADDQconst {
19826 break
19827 }
19828 c := auxIntToInt32(v_1_0.AuxInt)
19829 y := v_1_0.Args[0]
19830 if !(c&31 == 0) {
19831 break
19832 }
19833 v.reset(OpAMD64SARL)
19834 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
19835 v0.AddArg(y)
19836 v.AddArg2(x, v0)
19837 return true
19838 }
19839
19840
19841
19842 for {
19843 x := v_0
19844 if v_1.Op != OpAMD64ANDQconst {
19845 break
19846 }
19847 c := auxIntToInt32(v_1.AuxInt)
19848 y := v_1.Args[0]
19849 if !(c&31 == 31) {
19850 break
19851 }
19852 v.reset(OpAMD64SARL)
19853 v.AddArg2(x, y)
19854 return true
19855 }
19856
19857
19858
19859 for {
19860 x := v_0
19861 if v_1.Op != OpAMD64NEGQ {
19862 break
19863 }
19864 t := v_1.Type
19865 v_1_0 := v_1.Args[0]
19866 if v_1_0.Op != OpAMD64ANDQconst {
19867 break
19868 }
19869 c := auxIntToInt32(v_1_0.AuxInt)
19870 y := v_1_0.Args[0]
19871 if !(c&31 == 31) {
19872 break
19873 }
19874 v.reset(OpAMD64SARL)
19875 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
19876 v0.AddArg(y)
19877 v.AddArg2(x, v0)
19878 return true
19879 }
19880
19881
19882
19883 for {
19884 x := v_0
19885 if v_1.Op != OpAMD64ADDLconst {
19886 break
19887 }
19888 c := auxIntToInt32(v_1.AuxInt)
19889 y := v_1.Args[0]
19890 if !(c&31 == 0) {
19891 break
19892 }
19893 v.reset(OpAMD64SARL)
19894 v.AddArg2(x, y)
19895 return true
19896 }
19897
19898
19899
19900 for {
19901 x := v_0
19902 if v_1.Op != OpAMD64NEGL {
19903 break
19904 }
19905 t := v_1.Type
19906 v_1_0 := v_1.Args[0]
19907 if v_1_0.Op != OpAMD64ADDLconst {
19908 break
19909 }
19910 c := auxIntToInt32(v_1_0.AuxInt)
19911 y := v_1_0.Args[0]
19912 if !(c&31 == 0) {
19913 break
19914 }
19915 v.reset(OpAMD64SARL)
19916 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
19917 v0.AddArg(y)
19918 v.AddArg2(x, v0)
19919 return true
19920 }
19921
19922
19923
19924 for {
19925 x := v_0
19926 if v_1.Op != OpAMD64ANDLconst {
19927 break
19928 }
19929 c := auxIntToInt32(v_1.AuxInt)
19930 y := v_1.Args[0]
19931 if !(c&31 == 31) {
19932 break
19933 }
19934 v.reset(OpAMD64SARL)
19935 v.AddArg2(x, y)
19936 return true
19937 }
19938
19939
19940
19941 for {
19942 x := v_0
19943 if v_1.Op != OpAMD64NEGL {
19944 break
19945 }
19946 t := v_1.Type
19947 v_1_0 := v_1.Args[0]
19948 if v_1_0.Op != OpAMD64ANDLconst {
19949 break
19950 }
19951 c := auxIntToInt32(v_1_0.AuxInt)
19952 y := v_1_0.Args[0]
19953 if !(c&31 == 31) {
19954 break
19955 }
19956 v.reset(OpAMD64SARL)
19957 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
19958 v0.AddArg(y)
19959 v.AddArg2(x, v0)
19960 return true
19961 }
19962 return false
19963 }
19964 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
19965 v_0 := v.Args[0]
19966
19967
19968 for {
19969 if auxIntToInt8(v.AuxInt) != 0 {
19970 break
19971 }
19972 x := v_0
19973 v.copyOf(x)
19974 return true
19975 }
19976
19977
19978 for {
19979 c := auxIntToInt8(v.AuxInt)
19980 if v_0.Op != OpAMD64MOVQconst {
19981 break
19982 }
19983 d := auxIntToInt64(v_0.AuxInt)
19984 v.reset(OpAMD64MOVQconst)
19985 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
19986 return true
19987 }
19988 return false
19989 }
19990 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
19991 v_1 := v.Args[1]
19992 v_0 := v.Args[0]
19993 b := v.Block
19994
19995
19996 for {
19997 x := v_0
19998 if v_1.Op != OpAMD64MOVQconst {
19999 break
20000 }
20001 c := auxIntToInt64(v_1.AuxInt)
20002 v.reset(OpAMD64SARQconst)
20003 v.AuxInt = int8ToAuxInt(int8(c & 63))
20004 v.AddArg(x)
20005 return true
20006 }
20007
20008
20009 for {
20010 x := v_0
20011 if v_1.Op != OpAMD64MOVLconst {
20012 break
20013 }
20014 c := auxIntToInt32(v_1.AuxInt)
20015 v.reset(OpAMD64SARQconst)
20016 v.AuxInt = int8ToAuxInt(int8(c & 63))
20017 v.AddArg(x)
20018 return true
20019 }
20020
20021
20022
20023 for {
20024 x := v_0
20025 if v_1.Op != OpAMD64ADDQconst {
20026 break
20027 }
20028 c := auxIntToInt32(v_1.AuxInt)
20029 y := v_1.Args[0]
20030 if !(c&63 == 0) {
20031 break
20032 }
20033 v.reset(OpAMD64SARQ)
20034 v.AddArg2(x, y)
20035 return true
20036 }
20037
20038
20039
20040 for {
20041 x := v_0
20042 if v_1.Op != OpAMD64NEGQ {
20043 break
20044 }
20045 t := v_1.Type
20046 v_1_0 := v_1.Args[0]
20047 if v_1_0.Op != OpAMD64ADDQconst {
20048 break
20049 }
20050 c := auxIntToInt32(v_1_0.AuxInt)
20051 y := v_1_0.Args[0]
20052 if !(c&63 == 0) {
20053 break
20054 }
20055 v.reset(OpAMD64SARQ)
20056 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20057 v0.AddArg(y)
20058 v.AddArg2(x, v0)
20059 return true
20060 }
20061
20062
20063
20064 for {
20065 x := v_0
20066 if v_1.Op != OpAMD64ANDQconst {
20067 break
20068 }
20069 c := auxIntToInt32(v_1.AuxInt)
20070 y := v_1.Args[0]
20071 if !(c&63 == 63) {
20072 break
20073 }
20074 v.reset(OpAMD64SARQ)
20075 v.AddArg2(x, y)
20076 return true
20077 }
20078
20079
20080
20081 for {
20082 x := v_0
20083 if v_1.Op != OpAMD64NEGQ {
20084 break
20085 }
20086 t := v_1.Type
20087 v_1_0 := v_1.Args[0]
20088 if v_1_0.Op != OpAMD64ANDQconst {
20089 break
20090 }
20091 c := auxIntToInt32(v_1_0.AuxInt)
20092 y := v_1_0.Args[0]
20093 if !(c&63 == 63) {
20094 break
20095 }
20096 v.reset(OpAMD64SARQ)
20097 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20098 v0.AddArg(y)
20099 v.AddArg2(x, v0)
20100 return true
20101 }
20102
20103
20104
20105 for {
20106 x := v_0
20107 if v_1.Op != OpAMD64ADDLconst {
20108 break
20109 }
20110 c := auxIntToInt32(v_1.AuxInt)
20111 y := v_1.Args[0]
20112 if !(c&63 == 0) {
20113 break
20114 }
20115 v.reset(OpAMD64SARQ)
20116 v.AddArg2(x, y)
20117 return true
20118 }
20119
20120
20121
20122 for {
20123 x := v_0
20124 if v_1.Op != OpAMD64NEGL {
20125 break
20126 }
20127 t := v_1.Type
20128 v_1_0 := v_1.Args[0]
20129 if v_1_0.Op != OpAMD64ADDLconst {
20130 break
20131 }
20132 c := auxIntToInt32(v_1_0.AuxInt)
20133 y := v_1_0.Args[0]
20134 if !(c&63 == 0) {
20135 break
20136 }
20137 v.reset(OpAMD64SARQ)
20138 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20139 v0.AddArg(y)
20140 v.AddArg2(x, v0)
20141 return true
20142 }
20143
20144
20145
20146 for {
20147 x := v_0
20148 if v_1.Op != OpAMD64ANDLconst {
20149 break
20150 }
20151 c := auxIntToInt32(v_1.AuxInt)
20152 y := v_1.Args[0]
20153 if !(c&63 == 63) {
20154 break
20155 }
20156 v.reset(OpAMD64SARQ)
20157 v.AddArg2(x, y)
20158 return true
20159 }
20160
20161
20162
20163 for {
20164 x := v_0
20165 if v_1.Op != OpAMD64NEGL {
20166 break
20167 }
20168 t := v_1.Type
20169 v_1_0 := v_1.Args[0]
20170 if v_1_0.Op != OpAMD64ANDLconst {
20171 break
20172 }
20173 c := auxIntToInt32(v_1_0.AuxInt)
20174 y := v_1_0.Args[0]
20175 if !(c&63 == 63) {
20176 break
20177 }
20178 v.reset(OpAMD64SARQ)
20179 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20180 v0.AddArg(y)
20181 v.AddArg2(x, v0)
20182 return true
20183 }
20184 return false
20185 }
20186 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
20187 v_0 := v.Args[0]
20188
20189
20190 for {
20191 if auxIntToInt8(v.AuxInt) != 0 {
20192 break
20193 }
20194 x := v_0
20195 v.copyOf(x)
20196 return true
20197 }
20198
20199
20200 for {
20201 c := auxIntToInt8(v.AuxInt)
20202 if v_0.Op != OpAMD64MOVQconst {
20203 break
20204 }
20205 d := auxIntToInt64(v_0.AuxInt)
20206 v.reset(OpAMD64MOVQconst)
20207 v.AuxInt = int64ToAuxInt(d >> uint64(c))
20208 return true
20209 }
20210 return false
20211 }
20212 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
20213 v_1 := v.Args[1]
20214 v_0 := v.Args[0]
20215
20216
20217 for {
20218 x := v_0
20219 if v_1.Op != OpAMD64MOVQconst {
20220 break
20221 }
20222 c := auxIntToInt64(v_1.AuxInt)
20223 v.reset(OpAMD64SARWconst)
20224 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
20225 v.AddArg(x)
20226 return true
20227 }
20228
20229
20230 for {
20231 x := v_0
20232 if v_1.Op != OpAMD64MOVLconst {
20233 break
20234 }
20235 c := auxIntToInt32(v_1.AuxInt)
20236 v.reset(OpAMD64SARWconst)
20237 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
20238 v.AddArg(x)
20239 return true
20240 }
20241 return false
20242 }
20243 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
20244 v_0 := v.Args[0]
20245
20246
20247 for {
20248 if auxIntToInt8(v.AuxInt) != 0 {
20249 break
20250 }
20251 x := v_0
20252 v.copyOf(x)
20253 return true
20254 }
20255
20256
20257 for {
20258 c := auxIntToInt8(v.AuxInt)
20259 if v_0.Op != OpAMD64MOVQconst {
20260 break
20261 }
20262 d := auxIntToInt64(v_0.AuxInt)
20263 v.reset(OpAMD64MOVQconst)
20264 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
20265 return true
20266 }
20267 return false
20268 }
20269 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
20270 v_0 := v.Args[0]
20271
20272
20273 for {
20274 if v_0.Op != OpAMD64FlagEQ {
20275 break
20276 }
20277 v.reset(OpAMD64MOVLconst)
20278 v.AuxInt = int32ToAuxInt(0)
20279 return true
20280 }
20281
20282
20283 for {
20284 if v_0.Op != OpAMD64FlagLT_ULT {
20285 break
20286 }
20287 v.reset(OpAMD64MOVLconst)
20288 v.AuxInt = int32ToAuxInt(-1)
20289 return true
20290 }
20291
20292
20293 for {
20294 if v_0.Op != OpAMD64FlagLT_UGT {
20295 break
20296 }
20297 v.reset(OpAMD64MOVLconst)
20298 v.AuxInt = int32ToAuxInt(0)
20299 return true
20300 }
20301
20302
20303 for {
20304 if v_0.Op != OpAMD64FlagGT_ULT {
20305 break
20306 }
20307 v.reset(OpAMD64MOVLconst)
20308 v.AuxInt = int32ToAuxInt(-1)
20309 return true
20310 }
20311
20312
20313 for {
20314 if v_0.Op != OpAMD64FlagGT_UGT {
20315 break
20316 }
20317 v.reset(OpAMD64MOVLconst)
20318 v.AuxInt = int32ToAuxInt(0)
20319 return true
20320 }
20321 return false
20322 }
20323 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
20324 v_2 := v.Args[2]
20325 v_1 := v.Args[1]
20326 v_0 := v.Args[0]
20327
20328
20329
20330 for {
20331 x := v_0
20332 if v_1.Op != OpAMD64MOVQconst {
20333 break
20334 }
20335 c := auxIntToInt64(v_1.AuxInt)
20336 borrow := v_2
20337 if !(is32Bit(c)) {
20338 break
20339 }
20340 v.reset(OpAMD64SBBQconst)
20341 v.AuxInt = int32ToAuxInt(int32(c))
20342 v.AddArg2(x, borrow)
20343 return true
20344 }
20345
20346
20347 for {
20348 x := v_0
20349 y := v_1
20350 if v_2.Op != OpAMD64FlagEQ {
20351 break
20352 }
20353 v.reset(OpAMD64SUBQborrow)
20354 v.AddArg2(x, y)
20355 return true
20356 }
20357 return false
20358 }
20359 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
20360 v_0 := v.Args[0]
20361
20362
20363 for {
20364 if v_0.Op != OpAMD64FlagEQ {
20365 break
20366 }
20367 v.reset(OpAMD64MOVQconst)
20368 v.AuxInt = int64ToAuxInt(0)
20369 return true
20370 }
20371
20372
20373 for {
20374 if v_0.Op != OpAMD64FlagLT_ULT {
20375 break
20376 }
20377 v.reset(OpAMD64MOVQconst)
20378 v.AuxInt = int64ToAuxInt(-1)
20379 return true
20380 }
20381
20382
20383 for {
20384 if v_0.Op != OpAMD64FlagLT_UGT {
20385 break
20386 }
20387 v.reset(OpAMD64MOVQconst)
20388 v.AuxInt = int64ToAuxInt(0)
20389 return true
20390 }
20391
20392
20393 for {
20394 if v_0.Op != OpAMD64FlagGT_ULT {
20395 break
20396 }
20397 v.reset(OpAMD64MOVQconst)
20398 v.AuxInt = int64ToAuxInt(-1)
20399 return true
20400 }
20401
20402
20403 for {
20404 if v_0.Op != OpAMD64FlagGT_UGT {
20405 break
20406 }
20407 v.reset(OpAMD64MOVQconst)
20408 v.AuxInt = int64ToAuxInt(0)
20409 return true
20410 }
20411 return false
20412 }
20413 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
20414 v_1 := v.Args[1]
20415 v_0 := v.Args[0]
20416
20417
20418 for {
20419 c := auxIntToInt32(v.AuxInt)
20420 x := v_0
20421 if v_1.Op != OpAMD64FlagEQ {
20422 break
20423 }
20424 v.reset(OpAMD64SUBQconstborrow)
20425 v.AuxInt = int32ToAuxInt(c)
20426 v.AddArg(x)
20427 return true
20428 }
20429 return false
20430 }
20431 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
20432 v_0 := v.Args[0]
20433
20434
20435 for {
20436 if v_0.Op != OpAMD64InvertFlags {
20437 break
20438 }
20439 x := v_0.Args[0]
20440 v.reset(OpAMD64SETB)
20441 v.AddArg(x)
20442 return true
20443 }
20444
20445
20446 for {
20447 if v_0.Op != OpAMD64FlagEQ {
20448 break
20449 }
20450 v.reset(OpAMD64MOVLconst)
20451 v.AuxInt = int32ToAuxInt(0)
20452 return true
20453 }
20454
20455
20456 for {
20457 if v_0.Op != OpAMD64FlagLT_ULT {
20458 break
20459 }
20460 v.reset(OpAMD64MOVLconst)
20461 v.AuxInt = int32ToAuxInt(0)
20462 return true
20463 }
20464
20465
20466 for {
20467 if v_0.Op != OpAMD64FlagLT_UGT {
20468 break
20469 }
20470 v.reset(OpAMD64MOVLconst)
20471 v.AuxInt = int32ToAuxInt(1)
20472 return true
20473 }
20474
20475
20476 for {
20477 if v_0.Op != OpAMD64FlagGT_ULT {
20478 break
20479 }
20480 v.reset(OpAMD64MOVLconst)
20481 v.AuxInt = int32ToAuxInt(0)
20482 return true
20483 }
20484
20485
20486 for {
20487 if v_0.Op != OpAMD64FlagGT_UGT {
20488 break
20489 }
20490 v.reset(OpAMD64MOVLconst)
20491 v.AuxInt = int32ToAuxInt(1)
20492 return true
20493 }
20494 return false
20495 }
20496 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
20497 v_0 := v.Args[0]
20498
20499
20500 for {
20501 if v_0.Op != OpAMD64TESTQ {
20502 break
20503 }
20504 x := v_0.Args[1]
20505 if x != v_0.Args[0] {
20506 break
20507 }
20508 v.reset(OpConstBool)
20509 v.AuxInt = boolToAuxInt(true)
20510 return true
20511 }
20512
20513
20514 for {
20515 if v_0.Op != OpAMD64TESTL {
20516 break
20517 }
20518 x := v_0.Args[1]
20519 if x != v_0.Args[0] {
20520 break
20521 }
20522 v.reset(OpConstBool)
20523 v.AuxInt = boolToAuxInt(true)
20524 return true
20525 }
20526
20527
20528 for {
20529 if v_0.Op != OpAMD64TESTW {
20530 break
20531 }
20532 x := v_0.Args[1]
20533 if x != v_0.Args[0] {
20534 break
20535 }
20536 v.reset(OpConstBool)
20537 v.AuxInt = boolToAuxInt(true)
20538 return true
20539 }
20540
20541
20542 for {
20543 if v_0.Op != OpAMD64TESTB {
20544 break
20545 }
20546 x := v_0.Args[1]
20547 if x != v_0.Args[0] {
20548 break
20549 }
20550 v.reset(OpConstBool)
20551 v.AuxInt = boolToAuxInt(true)
20552 return true
20553 }
20554
20555
20556 for {
20557 if v_0.Op != OpAMD64InvertFlags {
20558 break
20559 }
20560 x := v_0.Args[0]
20561 v.reset(OpAMD64SETBE)
20562 v.AddArg(x)
20563 return true
20564 }
20565
20566
20567 for {
20568 if v_0.Op != OpAMD64FlagEQ {
20569 break
20570 }
20571 v.reset(OpAMD64MOVLconst)
20572 v.AuxInt = int32ToAuxInt(1)
20573 return true
20574 }
20575
20576
20577 for {
20578 if v_0.Op != OpAMD64FlagLT_ULT {
20579 break
20580 }
20581 v.reset(OpAMD64MOVLconst)
20582 v.AuxInt = int32ToAuxInt(0)
20583 return true
20584 }
20585
20586
20587 for {
20588 if v_0.Op != OpAMD64FlagLT_UGT {
20589 break
20590 }
20591 v.reset(OpAMD64MOVLconst)
20592 v.AuxInt = int32ToAuxInt(1)
20593 return true
20594 }
20595
20596
20597 for {
20598 if v_0.Op != OpAMD64FlagGT_ULT {
20599 break
20600 }
20601 v.reset(OpAMD64MOVLconst)
20602 v.AuxInt = int32ToAuxInt(0)
20603 return true
20604 }
20605
20606
20607 for {
20608 if v_0.Op != OpAMD64FlagGT_UGT {
20609 break
20610 }
20611 v.reset(OpAMD64MOVLconst)
20612 v.AuxInt = int32ToAuxInt(1)
20613 return true
20614 }
20615 return false
20616 }
20617 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
20618 v_2 := v.Args[2]
20619 v_1 := v.Args[1]
20620 v_0 := v.Args[0]
20621 b := v.Block
20622 typ := &b.Func.Config.Types
20623
20624
20625 for {
20626 off := auxIntToInt32(v.AuxInt)
20627 sym := auxToSym(v.Aux)
20628 ptr := v_0
20629 if v_1.Op != OpAMD64InvertFlags {
20630 break
20631 }
20632 x := v_1.Args[0]
20633 mem := v_2
20634 v.reset(OpAMD64SETBEstore)
20635 v.AuxInt = int32ToAuxInt(off)
20636 v.Aux = symToAux(sym)
20637 v.AddArg3(ptr, x, mem)
20638 return true
20639 }
20640
20641
20642
20643 for {
20644 off1 := auxIntToInt32(v.AuxInt)
20645 sym := auxToSym(v.Aux)
20646 if v_0.Op != OpAMD64ADDQconst {
20647 break
20648 }
20649 off2 := auxIntToInt32(v_0.AuxInt)
20650 base := v_0.Args[0]
20651 val := v_1
20652 mem := v_2
20653 if !(is32Bit(int64(off1) + int64(off2))) {
20654 break
20655 }
20656 v.reset(OpAMD64SETAEstore)
20657 v.AuxInt = int32ToAuxInt(off1 + off2)
20658 v.Aux = symToAux(sym)
20659 v.AddArg3(base, val, mem)
20660 return true
20661 }
20662
20663
20664
20665 for {
20666 off1 := auxIntToInt32(v.AuxInt)
20667 sym1 := auxToSym(v.Aux)
20668 if v_0.Op != OpAMD64LEAQ {
20669 break
20670 }
20671 off2 := auxIntToInt32(v_0.AuxInt)
20672 sym2 := auxToSym(v_0.Aux)
20673 base := v_0.Args[0]
20674 val := v_1
20675 mem := v_2
20676 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20677 break
20678 }
20679 v.reset(OpAMD64SETAEstore)
20680 v.AuxInt = int32ToAuxInt(off1 + off2)
20681 v.Aux = symToAux(mergeSym(sym1, sym2))
20682 v.AddArg3(base, val, mem)
20683 return true
20684 }
20685
20686
20687 for {
20688 off := auxIntToInt32(v.AuxInt)
20689 sym := auxToSym(v.Aux)
20690 ptr := v_0
20691 if v_1.Op != OpAMD64FlagEQ {
20692 break
20693 }
20694 mem := v_2
20695 v.reset(OpAMD64MOVBstore)
20696 v.AuxInt = int32ToAuxInt(off)
20697 v.Aux = symToAux(sym)
20698 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20699 v0.AuxInt = int32ToAuxInt(1)
20700 v.AddArg3(ptr, v0, mem)
20701 return true
20702 }
20703
20704
20705 for {
20706 off := auxIntToInt32(v.AuxInt)
20707 sym := auxToSym(v.Aux)
20708 ptr := v_0
20709 if v_1.Op != OpAMD64FlagLT_ULT {
20710 break
20711 }
20712 mem := v_2
20713 v.reset(OpAMD64MOVBstore)
20714 v.AuxInt = int32ToAuxInt(off)
20715 v.Aux = symToAux(sym)
20716 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20717 v0.AuxInt = int32ToAuxInt(0)
20718 v.AddArg3(ptr, v0, mem)
20719 return true
20720 }
20721
20722
20723 for {
20724 off := auxIntToInt32(v.AuxInt)
20725 sym := auxToSym(v.Aux)
20726 ptr := v_0
20727 if v_1.Op != OpAMD64FlagLT_UGT {
20728 break
20729 }
20730 mem := v_2
20731 v.reset(OpAMD64MOVBstore)
20732 v.AuxInt = int32ToAuxInt(off)
20733 v.Aux = symToAux(sym)
20734 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20735 v0.AuxInt = int32ToAuxInt(1)
20736 v.AddArg3(ptr, v0, mem)
20737 return true
20738 }
20739
20740
20741 for {
20742 off := auxIntToInt32(v.AuxInt)
20743 sym := auxToSym(v.Aux)
20744 ptr := v_0
20745 if v_1.Op != OpAMD64FlagGT_ULT {
20746 break
20747 }
20748 mem := v_2
20749 v.reset(OpAMD64MOVBstore)
20750 v.AuxInt = int32ToAuxInt(off)
20751 v.Aux = symToAux(sym)
20752 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20753 v0.AuxInt = int32ToAuxInt(0)
20754 v.AddArg3(ptr, v0, mem)
20755 return true
20756 }
20757
20758
20759 for {
20760 off := auxIntToInt32(v.AuxInt)
20761 sym := auxToSym(v.Aux)
20762 ptr := v_0
20763 if v_1.Op != OpAMD64FlagGT_UGT {
20764 break
20765 }
20766 mem := v_2
20767 v.reset(OpAMD64MOVBstore)
20768 v.AuxInt = int32ToAuxInt(off)
20769 v.Aux = symToAux(sym)
20770 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20771 v0.AuxInt = int32ToAuxInt(1)
20772 v.AddArg3(ptr, v0, mem)
20773 return true
20774 }
20775 return false
20776 }
20777 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
20778 v_2 := v.Args[2]
20779 v_1 := v.Args[1]
20780 v_0 := v.Args[0]
20781 b := v.Block
20782 typ := &b.Func.Config.Types
20783
20784
20785 for {
20786 off := auxIntToInt32(v.AuxInt)
20787 sym := auxToSym(v.Aux)
20788 ptr := v_0
20789 if v_1.Op != OpAMD64InvertFlags {
20790 break
20791 }
20792 x := v_1.Args[0]
20793 mem := v_2
20794 v.reset(OpAMD64SETBstore)
20795 v.AuxInt = int32ToAuxInt(off)
20796 v.Aux = symToAux(sym)
20797 v.AddArg3(ptr, x, mem)
20798 return true
20799 }
20800
20801
20802
20803 for {
20804 off1 := auxIntToInt32(v.AuxInt)
20805 sym := auxToSym(v.Aux)
20806 if v_0.Op != OpAMD64ADDQconst {
20807 break
20808 }
20809 off2 := auxIntToInt32(v_0.AuxInt)
20810 base := v_0.Args[0]
20811 val := v_1
20812 mem := v_2
20813 if !(is32Bit(int64(off1) + int64(off2))) {
20814 break
20815 }
20816 v.reset(OpAMD64SETAstore)
20817 v.AuxInt = int32ToAuxInt(off1 + off2)
20818 v.Aux = symToAux(sym)
20819 v.AddArg3(base, val, mem)
20820 return true
20821 }
20822
20823
20824
20825 for {
20826 off1 := auxIntToInt32(v.AuxInt)
20827 sym1 := auxToSym(v.Aux)
20828 if v_0.Op != OpAMD64LEAQ {
20829 break
20830 }
20831 off2 := auxIntToInt32(v_0.AuxInt)
20832 sym2 := auxToSym(v_0.Aux)
20833 base := v_0.Args[0]
20834 val := v_1
20835 mem := v_2
20836 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20837 break
20838 }
20839 v.reset(OpAMD64SETAstore)
20840 v.AuxInt = int32ToAuxInt(off1 + off2)
20841 v.Aux = symToAux(mergeSym(sym1, sym2))
20842 v.AddArg3(base, val, mem)
20843 return true
20844 }
20845
20846
20847 for {
20848 off := auxIntToInt32(v.AuxInt)
20849 sym := auxToSym(v.Aux)
20850 ptr := v_0
20851 if v_1.Op != OpAMD64FlagEQ {
20852 break
20853 }
20854 mem := v_2
20855 v.reset(OpAMD64MOVBstore)
20856 v.AuxInt = int32ToAuxInt(off)
20857 v.Aux = symToAux(sym)
20858 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20859 v0.AuxInt = int32ToAuxInt(0)
20860 v.AddArg3(ptr, v0, mem)
20861 return true
20862 }
20863
20864
20865 for {
20866 off := auxIntToInt32(v.AuxInt)
20867 sym := auxToSym(v.Aux)
20868 ptr := v_0
20869 if v_1.Op != OpAMD64FlagLT_ULT {
20870 break
20871 }
20872 mem := v_2
20873 v.reset(OpAMD64MOVBstore)
20874 v.AuxInt = int32ToAuxInt(off)
20875 v.Aux = symToAux(sym)
20876 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20877 v0.AuxInt = int32ToAuxInt(0)
20878 v.AddArg3(ptr, v0, mem)
20879 return true
20880 }
20881
20882
20883 for {
20884 off := auxIntToInt32(v.AuxInt)
20885 sym := auxToSym(v.Aux)
20886 ptr := v_0
20887 if v_1.Op != OpAMD64FlagLT_UGT {
20888 break
20889 }
20890 mem := v_2
20891 v.reset(OpAMD64MOVBstore)
20892 v.AuxInt = int32ToAuxInt(off)
20893 v.Aux = symToAux(sym)
20894 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20895 v0.AuxInt = int32ToAuxInt(1)
20896 v.AddArg3(ptr, v0, mem)
20897 return true
20898 }
20899
20900
20901 for {
20902 off := auxIntToInt32(v.AuxInt)
20903 sym := auxToSym(v.Aux)
20904 ptr := v_0
20905 if v_1.Op != OpAMD64FlagGT_ULT {
20906 break
20907 }
20908 mem := v_2
20909 v.reset(OpAMD64MOVBstore)
20910 v.AuxInt = int32ToAuxInt(off)
20911 v.Aux = symToAux(sym)
20912 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20913 v0.AuxInt = int32ToAuxInt(0)
20914 v.AddArg3(ptr, v0, mem)
20915 return true
20916 }
20917
20918
20919 for {
20920 off := auxIntToInt32(v.AuxInt)
20921 sym := auxToSym(v.Aux)
20922 ptr := v_0
20923 if v_1.Op != OpAMD64FlagGT_UGT {
20924 break
20925 }
20926 mem := v_2
20927 v.reset(OpAMD64MOVBstore)
20928 v.AuxInt = int32ToAuxInt(off)
20929 v.Aux = symToAux(sym)
20930 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20931 v0.AuxInt = int32ToAuxInt(1)
20932 v.AddArg3(ptr, v0, mem)
20933 return true
20934 }
20935 return false
20936 }
20937 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
20938 v_0 := v.Args[0]
20939
20940
20941 for {
20942 if v_0.Op != OpAMD64TESTQ {
20943 break
20944 }
20945 x := v_0.Args[1]
20946 if x != v_0.Args[0] {
20947 break
20948 }
20949 v.reset(OpConstBool)
20950 v.AuxInt = boolToAuxInt(false)
20951 return true
20952 }
20953
20954
20955 for {
20956 if v_0.Op != OpAMD64TESTL {
20957 break
20958 }
20959 x := v_0.Args[1]
20960 if x != v_0.Args[0] {
20961 break
20962 }
20963 v.reset(OpConstBool)
20964 v.AuxInt = boolToAuxInt(false)
20965 return true
20966 }
20967
20968
20969 for {
20970 if v_0.Op != OpAMD64TESTW {
20971 break
20972 }
20973 x := v_0.Args[1]
20974 if x != v_0.Args[0] {
20975 break
20976 }
20977 v.reset(OpConstBool)
20978 v.AuxInt = boolToAuxInt(false)
20979 return true
20980 }
20981
20982
20983 for {
20984 if v_0.Op != OpAMD64TESTB {
20985 break
20986 }
20987 x := v_0.Args[1]
20988 if x != v_0.Args[0] {
20989 break
20990 }
20991 v.reset(OpConstBool)
20992 v.AuxInt = boolToAuxInt(false)
20993 return true
20994 }
20995
20996
20997 for {
20998 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
20999 break
21000 }
21001 x := v_0.Args[0]
21002 v.reset(OpAMD64ANDLconst)
21003 v.AuxInt = int32ToAuxInt(1)
21004 v.AddArg(x)
21005 return true
21006 }
21007
21008
21009 for {
21010 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
21011 break
21012 }
21013 x := v_0.Args[0]
21014 v.reset(OpAMD64ANDQconst)
21015 v.AuxInt = int32ToAuxInt(1)
21016 v.AddArg(x)
21017 return true
21018 }
21019
21020
21021 for {
21022 if v_0.Op != OpAMD64InvertFlags {
21023 break
21024 }
21025 x := v_0.Args[0]
21026 v.reset(OpAMD64SETA)
21027 v.AddArg(x)
21028 return true
21029 }
21030
21031
21032 for {
21033 if v_0.Op != OpAMD64FlagEQ {
21034 break
21035 }
21036 v.reset(OpAMD64MOVLconst)
21037 v.AuxInt = int32ToAuxInt(0)
21038 return true
21039 }
21040
21041
21042 for {
21043 if v_0.Op != OpAMD64FlagLT_ULT {
21044 break
21045 }
21046 v.reset(OpAMD64MOVLconst)
21047 v.AuxInt = int32ToAuxInt(1)
21048 return true
21049 }
21050
21051
21052 for {
21053 if v_0.Op != OpAMD64FlagLT_UGT {
21054 break
21055 }
21056 v.reset(OpAMD64MOVLconst)
21057 v.AuxInt = int32ToAuxInt(0)
21058 return true
21059 }
21060
21061
21062 for {
21063 if v_0.Op != OpAMD64FlagGT_ULT {
21064 break
21065 }
21066 v.reset(OpAMD64MOVLconst)
21067 v.AuxInt = int32ToAuxInt(1)
21068 return true
21069 }
21070
21071
21072 for {
21073 if v_0.Op != OpAMD64FlagGT_UGT {
21074 break
21075 }
21076 v.reset(OpAMD64MOVLconst)
21077 v.AuxInt = int32ToAuxInt(0)
21078 return true
21079 }
21080 return false
21081 }
21082 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
21083 v_0 := v.Args[0]
21084
21085
21086 for {
21087 if v_0.Op != OpAMD64InvertFlags {
21088 break
21089 }
21090 x := v_0.Args[0]
21091 v.reset(OpAMD64SETAE)
21092 v.AddArg(x)
21093 return true
21094 }
21095
21096
21097 for {
21098 if v_0.Op != OpAMD64FlagEQ {
21099 break
21100 }
21101 v.reset(OpAMD64MOVLconst)
21102 v.AuxInt = int32ToAuxInt(1)
21103 return true
21104 }
21105
21106
21107 for {
21108 if v_0.Op != OpAMD64FlagLT_ULT {
21109 break
21110 }
21111 v.reset(OpAMD64MOVLconst)
21112 v.AuxInt = int32ToAuxInt(1)
21113 return true
21114 }
21115
21116
21117 for {
21118 if v_0.Op != OpAMD64FlagLT_UGT {
21119 break
21120 }
21121 v.reset(OpAMD64MOVLconst)
21122 v.AuxInt = int32ToAuxInt(0)
21123 return true
21124 }
21125
21126
21127 for {
21128 if v_0.Op != OpAMD64FlagGT_ULT {
21129 break
21130 }
21131 v.reset(OpAMD64MOVLconst)
21132 v.AuxInt = int32ToAuxInt(1)
21133 return true
21134 }
21135
21136
21137 for {
21138 if v_0.Op != OpAMD64FlagGT_UGT {
21139 break
21140 }
21141 v.reset(OpAMD64MOVLconst)
21142 v.AuxInt = int32ToAuxInt(0)
21143 return true
21144 }
21145 return false
21146 }
21147 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
21148 v_2 := v.Args[2]
21149 v_1 := v.Args[1]
21150 v_0 := v.Args[0]
21151 b := v.Block
21152 typ := &b.Func.Config.Types
21153
21154
21155 for {
21156 off := auxIntToInt32(v.AuxInt)
21157 sym := auxToSym(v.Aux)
21158 ptr := v_0
21159 if v_1.Op != OpAMD64InvertFlags {
21160 break
21161 }
21162 x := v_1.Args[0]
21163 mem := v_2
21164 v.reset(OpAMD64SETAEstore)
21165 v.AuxInt = int32ToAuxInt(off)
21166 v.Aux = symToAux(sym)
21167 v.AddArg3(ptr, x, mem)
21168 return true
21169 }
21170
21171
21172
21173 for {
21174 off1 := auxIntToInt32(v.AuxInt)
21175 sym := auxToSym(v.Aux)
21176 if v_0.Op != OpAMD64ADDQconst {
21177 break
21178 }
21179 off2 := auxIntToInt32(v_0.AuxInt)
21180 base := v_0.Args[0]
21181 val := v_1
21182 mem := v_2
21183 if !(is32Bit(int64(off1) + int64(off2))) {
21184 break
21185 }
21186 v.reset(OpAMD64SETBEstore)
21187 v.AuxInt = int32ToAuxInt(off1 + off2)
21188 v.Aux = symToAux(sym)
21189 v.AddArg3(base, val, mem)
21190 return true
21191 }
21192
21193
21194
21195 for {
21196 off1 := auxIntToInt32(v.AuxInt)
21197 sym1 := auxToSym(v.Aux)
21198 if v_0.Op != OpAMD64LEAQ {
21199 break
21200 }
21201 off2 := auxIntToInt32(v_0.AuxInt)
21202 sym2 := auxToSym(v_0.Aux)
21203 base := v_0.Args[0]
21204 val := v_1
21205 mem := v_2
21206 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21207 break
21208 }
21209 v.reset(OpAMD64SETBEstore)
21210 v.AuxInt = int32ToAuxInt(off1 + off2)
21211 v.Aux = symToAux(mergeSym(sym1, sym2))
21212 v.AddArg3(base, val, mem)
21213 return true
21214 }
21215
21216
21217 for {
21218 off := auxIntToInt32(v.AuxInt)
21219 sym := auxToSym(v.Aux)
21220 ptr := v_0
21221 if v_1.Op != OpAMD64FlagEQ {
21222 break
21223 }
21224 mem := v_2
21225 v.reset(OpAMD64MOVBstore)
21226 v.AuxInt = int32ToAuxInt(off)
21227 v.Aux = symToAux(sym)
21228 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21229 v0.AuxInt = int32ToAuxInt(1)
21230 v.AddArg3(ptr, v0, mem)
21231 return true
21232 }
21233
21234
21235 for {
21236 off := auxIntToInt32(v.AuxInt)
21237 sym := auxToSym(v.Aux)
21238 ptr := v_0
21239 if v_1.Op != OpAMD64FlagLT_ULT {
21240 break
21241 }
21242 mem := v_2
21243 v.reset(OpAMD64MOVBstore)
21244 v.AuxInt = int32ToAuxInt(off)
21245 v.Aux = symToAux(sym)
21246 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21247 v0.AuxInt = int32ToAuxInt(1)
21248 v.AddArg3(ptr, v0, mem)
21249 return true
21250 }
21251
21252
21253 for {
21254 off := auxIntToInt32(v.AuxInt)
21255 sym := auxToSym(v.Aux)
21256 ptr := v_0
21257 if v_1.Op != OpAMD64FlagLT_UGT {
21258 break
21259 }
21260 mem := v_2
21261 v.reset(OpAMD64MOVBstore)
21262 v.AuxInt = int32ToAuxInt(off)
21263 v.Aux = symToAux(sym)
21264 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21265 v0.AuxInt = int32ToAuxInt(0)
21266 v.AddArg3(ptr, v0, mem)
21267 return true
21268 }
21269
21270
21271 for {
21272 off := auxIntToInt32(v.AuxInt)
21273 sym := auxToSym(v.Aux)
21274 ptr := v_0
21275 if v_1.Op != OpAMD64FlagGT_ULT {
21276 break
21277 }
21278 mem := v_2
21279 v.reset(OpAMD64MOVBstore)
21280 v.AuxInt = int32ToAuxInt(off)
21281 v.Aux = symToAux(sym)
21282 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21283 v0.AuxInt = int32ToAuxInt(1)
21284 v.AddArg3(ptr, v0, mem)
21285 return true
21286 }
21287
21288
21289 for {
21290 off := auxIntToInt32(v.AuxInt)
21291 sym := auxToSym(v.Aux)
21292 ptr := v_0
21293 if v_1.Op != OpAMD64FlagGT_UGT {
21294 break
21295 }
21296 mem := v_2
21297 v.reset(OpAMD64MOVBstore)
21298 v.AuxInt = int32ToAuxInt(off)
21299 v.Aux = symToAux(sym)
21300 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21301 v0.AuxInt = int32ToAuxInt(0)
21302 v.AddArg3(ptr, v0, mem)
21303 return true
21304 }
21305 return false
21306 }
21307 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
21308 v_2 := v.Args[2]
21309 v_1 := v.Args[1]
21310 v_0 := v.Args[0]
21311 b := v.Block
21312 typ := &b.Func.Config.Types
21313
21314
21315 for {
21316 off := auxIntToInt32(v.AuxInt)
21317 sym := auxToSym(v.Aux)
21318 ptr := v_0
21319 if v_1.Op != OpAMD64InvertFlags {
21320 break
21321 }
21322 x := v_1.Args[0]
21323 mem := v_2
21324 v.reset(OpAMD64SETAstore)
21325 v.AuxInt = int32ToAuxInt(off)
21326 v.Aux = symToAux(sym)
21327 v.AddArg3(ptr, x, mem)
21328 return true
21329 }
21330
21331
21332
21333 for {
21334 off1 := auxIntToInt32(v.AuxInt)
21335 sym := auxToSym(v.Aux)
21336 if v_0.Op != OpAMD64ADDQconst {
21337 break
21338 }
21339 off2 := auxIntToInt32(v_0.AuxInt)
21340 base := v_0.Args[0]
21341 val := v_1
21342 mem := v_2
21343 if !(is32Bit(int64(off1) + int64(off2))) {
21344 break
21345 }
21346 v.reset(OpAMD64SETBstore)
21347 v.AuxInt = int32ToAuxInt(off1 + off2)
21348 v.Aux = symToAux(sym)
21349 v.AddArg3(base, val, mem)
21350 return true
21351 }
21352
21353
21354
21355 for {
21356 off1 := auxIntToInt32(v.AuxInt)
21357 sym1 := auxToSym(v.Aux)
21358 if v_0.Op != OpAMD64LEAQ {
21359 break
21360 }
21361 off2 := auxIntToInt32(v_0.AuxInt)
21362 sym2 := auxToSym(v_0.Aux)
21363 base := v_0.Args[0]
21364 val := v_1
21365 mem := v_2
21366 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21367 break
21368 }
21369 v.reset(OpAMD64SETBstore)
21370 v.AuxInt = int32ToAuxInt(off1 + off2)
21371 v.Aux = symToAux(mergeSym(sym1, sym2))
21372 v.AddArg3(base, val, mem)
21373 return true
21374 }
21375
21376
21377 for {
21378 off := auxIntToInt32(v.AuxInt)
21379 sym := auxToSym(v.Aux)
21380 ptr := v_0
21381 if v_1.Op != OpAMD64FlagEQ {
21382 break
21383 }
21384 mem := v_2
21385 v.reset(OpAMD64MOVBstore)
21386 v.AuxInt = int32ToAuxInt(off)
21387 v.Aux = symToAux(sym)
21388 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21389 v0.AuxInt = int32ToAuxInt(0)
21390 v.AddArg3(ptr, v0, mem)
21391 return true
21392 }
21393
21394
21395 for {
21396 off := auxIntToInt32(v.AuxInt)
21397 sym := auxToSym(v.Aux)
21398 ptr := v_0
21399 if v_1.Op != OpAMD64FlagLT_ULT {
21400 break
21401 }
21402 mem := v_2
21403 v.reset(OpAMD64MOVBstore)
21404 v.AuxInt = int32ToAuxInt(off)
21405 v.Aux = symToAux(sym)
21406 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21407 v0.AuxInt = int32ToAuxInt(1)
21408 v.AddArg3(ptr, v0, mem)
21409 return true
21410 }
21411
21412
21413 for {
21414 off := auxIntToInt32(v.AuxInt)
21415 sym := auxToSym(v.Aux)
21416 ptr := v_0
21417 if v_1.Op != OpAMD64FlagLT_UGT {
21418 break
21419 }
21420 mem := v_2
21421 v.reset(OpAMD64MOVBstore)
21422 v.AuxInt = int32ToAuxInt(off)
21423 v.Aux = symToAux(sym)
21424 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21425 v0.AuxInt = int32ToAuxInt(0)
21426 v.AddArg3(ptr, v0, mem)
21427 return true
21428 }
21429
21430
21431 for {
21432 off := auxIntToInt32(v.AuxInt)
21433 sym := auxToSym(v.Aux)
21434 ptr := v_0
21435 if v_1.Op != OpAMD64FlagGT_ULT {
21436 break
21437 }
21438 mem := v_2
21439 v.reset(OpAMD64MOVBstore)
21440 v.AuxInt = int32ToAuxInt(off)
21441 v.Aux = symToAux(sym)
21442 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21443 v0.AuxInt = int32ToAuxInt(1)
21444 v.AddArg3(ptr, v0, mem)
21445 return true
21446 }
21447
21448
21449 for {
21450 off := auxIntToInt32(v.AuxInt)
21451 sym := auxToSym(v.Aux)
21452 ptr := v_0
21453 if v_1.Op != OpAMD64FlagGT_UGT {
21454 break
21455 }
21456 mem := v_2
21457 v.reset(OpAMD64MOVBstore)
21458 v.AuxInt = int32ToAuxInt(off)
21459 v.Aux = symToAux(sym)
21460 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
21461 v0.AuxInt = int32ToAuxInt(0)
21462 v.AddArg3(ptr, v0, mem)
21463 return true
21464 }
21465 return false
21466 }
21467 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
21468 v_0 := v.Args[0]
21469 b := v.Block
21470
21471
21472 for {
21473 if v_0.Op != OpAMD64TESTL {
21474 break
21475 }
21476 _ = v_0.Args[1]
21477 v_0_0 := v_0.Args[0]
21478 v_0_1 := v_0.Args[1]
21479 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21480 if v_0_0.Op != OpAMD64SHLL {
21481 continue
21482 }
21483 x := v_0_0.Args[1]
21484 v_0_0_0 := v_0_0.Args[0]
21485 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
21486 continue
21487 }
21488 y := v_0_1
21489 v.reset(OpAMD64SETAE)
21490 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
21491 v0.AddArg2(x, y)
21492 v.AddArg(v0)
21493 return true
21494 }
21495 break
21496 }
21497
21498
21499 for {
21500 if v_0.Op != OpAMD64TESTQ {
21501 break
21502 }
21503 _ = v_0.Args[1]
21504 v_0_0 := v_0.Args[0]
21505 v_0_1 := v_0.Args[1]
21506 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21507 if v_0_0.Op != OpAMD64SHLQ {
21508 continue
21509 }
21510 x := v_0_0.Args[1]
21511 v_0_0_0 := v_0_0.Args[0]
21512 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
21513 continue
21514 }
21515 y := v_0_1
21516 v.reset(OpAMD64SETAE)
21517 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
21518 v0.AddArg2(x, y)
21519 v.AddArg(v0)
21520 return true
21521 }
21522 break
21523 }
21524
21525
21526
21527 for {
21528 if v_0.Op != OpAMD64TESTLconst {
21529 break
21530 }
21531 c := auxIntToInt32(v_0.AuxInt)
21532 x := v_0.Args[0]
21533 if !(isUint32PowerOfTwo(int64(c))) {
21534 break
21535 }
21536 v.reset(OpAMD64SETAE)
21537 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
21538 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
21539 v0.AddArg(x)
21540 v.AddArg(v0)
21541 return true
21542 }
21543
21544
21545
21546 for {
21547 if v_0.Op != OpAMD64TESTQconst {
21548 break
21549 }
21550 c := auxIntToInt32(v_0.AuxInt)
21551 x := v_0.Args[0]
21552 if !(isUint64PowerOfTwo(int64(c))) {
21553 break
21554 }
21555 v.reset(OpAMD64SETAE)
21556 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21557 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
21558 v0.AddArg(x)
21559 v.AddArg(v0)
21560 return true
21561 }
21562
21563
21564
21565 for {
21566 if v_0.Op != OpAMD64TESTQ {
21567 break
21568 }
21569 _ = v_0.Args[1]
21570 v_0_0 := v_0.Args[0]
21571 v_0_1 := v_0.Args[1]
21572 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21573 if v_0_0.Op != OpAMD64MOVQconst {
21574 continue
21575 }
21576 c := auxIntToInt64(v_0_0.AuxInt)
21577 x := v_0_1
21578 if !(isUint64PowerOfTwo(c)) {
21579 continue
21580 }
21581 v.reset(OpAMD64SETAE)
21582 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21583 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
21584 v0.AddArg(x)
21585 v.AddArg(v0)
21586 return true
21587 }
21588 break
21589 }
21590
21591
21592 for {
21593 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
21594 break
21595 }
21596 s := v_0.Args[0]
21597 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
21598 break
21599 }
21600 v.reset(OpAMD64SETNE)
21601 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
21602 v0.AuxInt = int32ToAuxInt(0)
21603 v0.AddArg(s)
21604 v.AddArg(v0)
21605 return true
21606 }
21607
21608
21609 for {
21610 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
21611 break
21612 }
21613 s := v_0.Args[0]
21614 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
21615 break
21616 }
21617 v.reset(OpAMD64SETNE)
21618 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
21619 v0.AuxInt = int32ToAuxInt(0)
21620 v0.AddArg(s)
21621 v.AddArg(v0)
21622 return true
21623 }
21624
21625
21626
21627 for {
21628 if v_0.Op != OpAMD64TESTQ {
21629 break
21630 }
21631 _ = v_0.Args[1]
21632 v_0_0 := v_0.Args[0]
21633 v_0_1 := v_0.Args[1]
21634 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21635 z1 := v_0_0
21636 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
21637 continue
21638 }
21639 z1_0 := z1.Args[0]
21640 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
21641 continue
21642 }
21643 x := z1_0.Args[0]
21644 z2 := v_0_1
21645 if !(z1 == z2) {
21646 continue
21647 }
21648 v.reset(OpAMD64SETAE)
21649 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21650 v0.AuxInt = int8ToAuxInt(63)
21651 v0.AddArg(x)
21652 v.AddArg(v0)
21653 return true
21654 }
21655 break
21656 }
21657
21658
21659
21660 for {
21661 if v_0.Op != OpAMD64TESTL {
21662 break
21663 }
21664 _ = v_0.Args[1]
21665 v_0_0 := v_0.Args[0]
21666 v_0_1 := v_0.Args[1]
21667 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21668 z1 := v_0_0
21669 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
21670 continue
21671 }
21672 z1_0 := z1.Args[0]
21673 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
21674 continue
21675 }
21676 x := z1_0.Args[0]
21677 z2 := v_0_1
21678 if !(z1 == z2) {
21679 continue
21680 }
21681 v.reset(OpAMD64SETAE)
21682 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21683 v0.AuxInt = int8ToAuxInt(31)
21684 v0.AddArg(x)
21685 v.AddArg(v0)
21686 return true
21687 }
21688 break
21689 }
21690
21691
21692
21693 for {
21694 if v_0.Op != OpAMD64TESTQ {
21695 break
21696 }
21697 _ = v_0.Args[1]
21698 v_0_0 := v_0.Args[0]
21699 v_0_1 := v_0.Args[1]
21700 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21701 z1 := v_0_0
21702 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
21703 continue
21704 }
21705 z1_0 := z1.Args[0]
21706 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
21707 continue
21708 }
21709 x := z1_0.Args[0]
21710 z2 := v_0_1
21711 if !(z1 == z2) {
21712 continue
21713 }
21714 v.reset(OpAMD64SETAE)
21715 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21716 v0.AuxInt = int8ToAuxInt(0)
21717 v0.AddArg(x)
21718 v.AddArg(v0)
21719 return true
21720 }
21721 break
21722 }
21723
21724
21725
21726 for {
21727 if v_0.Op != OpAMD64TESTL {
21728 break
21729 }
21730 _ = v_0.Args[1]
21731 v_0_0 := v_0.Args[0]
21732 v_0_1 := v_0.Args[1]
21733 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21734 z1 := v_0_0
21735 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
21736 continue
21737 }
21738 z1_0 := z1.Args[0]
21739 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
21740 continue
21741 }
21742 x := z1_0.Args[0]
21743 z2 := v_0_1
21744 if !(z1 == z2) {
21745 continue
21746 }
21747 v.reset(OpAMD64SETAE)
21748 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
21749 v0.AuxInt = int8ToAuxInt(0)
21750 v0.AddArg(x)
21751 v.AddArg(v0)
21752 return true
21753 }
21754 break
21755 }
21756
21757
21758
21759 for {
21760 if v_0.Op != OpAMD64TESTQ {
21761 break
21762 }
21763 _ = v_0.Args[1]
21764 v_0_0 := v_0.Args[0]
21765 v_0_1 := v_0.Args[1]
21766 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21767 z1 := v_0_0
21768 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
21769 continue
21770 }
21771 x := z1.Args[0]
21772 z2 := v_0_1
21773 if !(z1 == z2) {
21774 continue
21775 }
21776 v.reset(OpAMD64SETAE)
21777 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21778 v0.AuxInt = int8ToAuxInt(63)
21779 v0.AddArg(x)
21780 v.AddArg(v0)
21781 return true
21782 }
21783 break
21784 }
21785
21786
21787
21788 for {
21789 if v_0.Op != OpAMD64TESTL {
21790 break
21791 }
21792 _ = v_0.Args[1]
21793 v_0_0 := v_0.Args[0]
21794 v_0_1 := v_0.Args[1]
21795 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
21796 z1 := v_0_0
21797 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
21798 continue
21799 }
21800 x := z1.Args[0]
21801 z2 := v_0_1
21802 if !(z1 == z2) {
21803 continue
21804 }
21805 v.reset(OpAMD64SETAE)
21806 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
21807 v0.AuxInt = int8ToAuxInt(31)
21808 v0.AddArg(x)
21809 v.AddArg(v0)
21810 return true
21811 }
21812 break
21813 }
21814
21815
21816 for {
21817 if v_0.Op != OpAMD64InvertFlags {
21818 break
21819 }
21820 x := v_0.Args[0]
21821 v.reset(OpAMD64SETEQ)
21822 v.AddArg(x)
21823 return true
21824 }
21825
21826
21827 for {
21828 if v_0.Op != OpAMD64FlagEQ {
21829 break
21830 }
21831 v.reset(OpAMD64MOVLconst)
21832 v.AuxInt = int32ToAuxInt(1)
21833 return true
21834 }
21835
21836
21837 for {
21838 if v_0.Op != OpAMD64FlagLT_ULT {
21839 break
21840 }
21841 v.reset(OpAMD64MOVLconst)
21842 v.AuxInt = int32ToAuxInt(0)
21843 return true
21844 }
21845
21846
21847 for {
21848 if v_0.Op != OpAMD64FlagLT_UGT {
21849 break
21850 }
21851 v.reset(OpAMD64MOVLconst)
21852 v.AuxInt = int32ToAuxInt(0)
21853 return true
21854 }
21855
21856
21857 for {
21858 if v_0.Op != OpAMD64FlagGT_ULT {
21859 break
21860 }
21861 v.reset(OpAMD64MOVLconst)
21862 v.AuxInt = int32ToAuxInt(0)
21863 return true
21864 }
21865
21866
21867 for {
21868 if v_0.Op != OpAMD64FlagGT_UGT {
21869 break
21870 }
21871 v.reset(OpAMD64MOVLconst)
21872 v.AuxInt = int32ToAuxInt(0)
21873 return true
21874 }
21875 return false
21876 }
21877 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
21878 v_2 := v.Args[2]
21879 v_1 := v.Args[1]
21880 v_0 := v.Args[0]
21881 b := v.Block
21882 typ := &b.Func.Config.Types
21883
21884
21885 for {
21886 off := auxIntToInt32(v.AuxInt)
21887 sym := auxToSym(v.Aux)
21888 ptr := v_0
21889 if v_1.Op != OpAMD64TESTL {
21890 break
21891 }
21892 _ = v_1.Args[1]
21893 v_1_0 := v_1.Args[0]
21894 v_1_1 := v_1.Args[1]
21895 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
21896 if v_1_0.Op != OpAMD64SHLL {
21897 continue
21898 }
21899 x := v_1_0.Args[1]
21900 v_1_0_0 := v_1_0.Args[0]
21901 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
21902 continue
21903 }
21904 y := v_1_1
21905 mem := v_2
21906 v.reset(OpAMD64SETAEstore)
21907 v.AuxInt = int32ToAuxInt(off)
21908 v.Aux = symToAux(sym)
21909 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
21910 v0.AddArg2(x, y)
21911 v.AddArg3(ptr, v0, mem)
21912 return true
21913 }
21914 break
21915 }
21916
21917
21918 for {
21919 off := auxIntToInt32(v.AuxInt)
21920 sym := auxToSym(v.Aux)
21921 ptr := v_0
21922 if v_1.Op != OpAMD64TESTQ {
21923 break
21924 }
21925 _ = v_1.Args[1]
21926 v_1_0 := v_1.Args[0]
21927 v_1_1 := v_1.Args[1]
21928 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
21929 if v_1_0.Op != OpAMD64SHLQ {
21930 continue
21931 }
21932 x := v_1_0.Args[1]
21933 v_1_0_0 := v_1_0.Args[0]
21934 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
21935 continue
21936 }
21937 y := v_1_1
21938 mem := v_2
21939 v.reset(OpAMD64SETAEstore)
21940 v.AuxInt = int32ToAuxInt(off)
21941 v.Aux = symToAux(sym)
21942 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
21943 v0.AddArg2(x, y)
21944 v.AddArg3(ptr, v0, mem)
21945 return true
21946 }
21947 break
21948 }
21949
21950
21951
21952 for {
21953 off := auxIntToInt32(v.AuxInt)
21954 sym := auxToSym(v.Aux)
21955 ptr := v_0
21956 if v_1.Op != OpAMD64TESTLconst {
21957 break
21958 }
21959 c := auxIntToInt32(v_1.AuxInt)
21960 x := v_1.Args[0]
21961 mem := v_2
21962 if !(isUint32PowerOfTwo(int64(c))) {
21963 break
21964 }
21965 v.reset(OpAMD64SETAEstore)
21966 v.AuxInt = int32ToAuxInt(off)
21967 v.Aux = symToAux(sym)
21968 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
21969 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
21970 v0.AddArg(x)
21971 v.AddArg3(ptr, v0, mem)
21972 return true
21973 }
21974
21975
21976
21977 for {
21978 off := auxIntToInt32(v.AuxInt)
21979 sym := auxToSym(v.Aux)
21980 ptr := v_0
21981 if v_1.Op != OpAMD64TESTQconst {
21982 break
21983 }
21984 c := auxIntToInt32(v_1.AuxInt)
21985 x := v_1.Args[0]
21986 mem := v_2
21987 if !(isUint64PowerOfTwo(int64(c))) {
21988 break
21989 }
21990 v.reset(OpAMD64SETAEstore)
21991 v.AuxInt = int32ToAuxInt(off)
21992 v.Aux = symToAux(sym)
21993 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
21994 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
21995 v0.AddArg(x)
21996 v.AddArg3(ptr, v0, mem)
21997 return true
21998 }
21999
22000
22001
22002 for {
22003 off := auxIntToInt32(v.AuxInt)
22004 sym := auxToSym(v.Aux)
22005 ptr := v_0
22006 if v_1.Op != OpAMD64TESTQ {
22007 break
22008 }
22009 _ = v_1.Args[1]
22010 v_1_0 := v_1.Args[0]
22011 v_1_1 := v_1.Args[1]
22012 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22013 if v_1_0.Op != OpAMD64MOVQconst {
22014 continue
22015 }
22016 c := auxIntToInt64(v_1_0.AuxInt)
22017 x := v_1_1
22018 mem := v_2
22019 if !(isUint64PowerOfTwo(c)) {
22020 continue
22021 }
22022 v.reset(OpAMD64SETAEstore)
22023 v.AuxInt = int32ToAuxInt(off)
22024 v.Aux = symToAux(sym)
22025 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22026 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
22027 v0.AddArg(x)
22028 v.AddArg3(ptr, v0, mem)
22029 return true
22030 }
22031 break
22032 }
22033
22034
22035 for {
22036 off := auxIntToInt32(v.AuxInt)
22037 sym := auxToSym(v.Aux)
22038 ptr := v_0
22039 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
22040 break
22041 }
22042 s := v_1.Args[0]
22043 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
22044 break
22045 }
22046 mem := v_2
22047 v.reset(OpAMD64SETNEstore)
22048 v.AuxInt = int32ToAuxInt(off)
22049 v.Aux = symToAux(sym)
22050 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
22051 v0.AuxInt = int32ToAuxInt(0)
22052 v0.AddArg(s)
22053 v.AddArg3(ptr, v0, mem)
22054 return true
22055 }
22056
22057
22058 for {
22059 off := auxIntToInt32(v.AuxInt)
22060 sym := auxToSym(v.Aux)
22061 ptr := v_0
22062 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
22063 break
22064 }
22065 s := v_1.Args[0]
22066 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
22067 break
22068 }
22069 mem := v_2
22070 v.reset(OpAMD64SETNEstore)
22071 v.AuxInt = int32ToAuxInt(off)
22072 v.Aux = symToAux(sym)
22073 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
22074 v0.AuxInt = int32ToAuxInt(0)
22075 v0.AddArg(s)
22076 v.AddArg3(ptr, v0, mem)
22077 return true
22078 }
22079
22080
22081
22082 for {
22083 off := auxIntToInt32(v.AuxInt)
22084 sym := auxToSym(v.Aux)
22085 ptr := v_0
22086 if v_1.Op != OpAMD64TESTQ {
22087 break
22088 }
22089 _ = v_1.Args[1]
22090 v_1_0 := v_1.Args[0]
22091 v_1_1 := v_1.Args[1]
22092 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22093 z1 := v_1_0
22094 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
22095 continue
22096 }
22097 z1_0 := z1.Args[0]
22098 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
22099 continue
22100 }
22101 x := z1_0.Args[0]
22102 z2 := v_1_1
22103 mem := v_2
22104 if !(z1 == z2) {
22105 continue
22106 }
22107 v.reset(OpAMD64SETAEstore)
22108 v.AuxInt = int32ToAuxInt(off)
22109 v.Aux = symToAux(sym)
22110 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22111 v0.AuxInt = int8ToAuxInt(63)
22112 v0.AddArg(x)
22113 v.AddArg3(ptr, v0, mem)
22114 return true
22115 }
22116 break
22117 }
22118
22119
22120
22121 for {
22122 off := auxIntToInt32(v.AuxInt)
22123 sym := auxToSym(v.Aux)
22124 ptr := v_0
22125 if v_1.Op != OpAMD64TESTL {
22126 break
22127 }
22128 _ = v_1.Args[1]
22129 v_1_0 := v_1.Args[0]
22130 v_1_1 := v_1.Args[1]
22131 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22132 z1 := v_1_0
22133 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
22134 continue
22135 }
22136 z1_0 := z1.Args[0]
22137 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
22138 continue
22139 }
22140 x := z1_0.Args[0]
22141 z2 := v_1_1
22142 mem := v_2
22143 if !(z1 == z2) {
22144 continue
22145 }
22146 v.reset(OpAMD64SETAEstore)
22147 v.AuxInt = int32ToAuxInt(off)
22148 v.Aux = symToAux(sym)
22149 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22150 v0.AuxInt = int8ToAuxInt(31)
22151 v0.AddArg(x)
22152 v.AddArg3(ptr, v0, mem)
22153 return true
22154 }
22155 break
22156 }
22157
22158
22159
22160 for {
22161 off := auxIntToInt32(v.AuxInt)
22162 sym := auxToSym(v.Aux)
22163 ptr := v_0
22164 if v_1.Op != OpAMD64TESTQ {
22165 break
22166 }
22167 _ = v_1.Args[1]
22168 v_1_0 := v_1.Args[0]
22169 v_1_1 := v_1.Args[1]
22170 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22171 z1 := v_1_0
22172 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
22173 continue
22174 }
22175 z1_0 := z1.Args[0]
22176 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
22177 continue
22178 }
22179 x := z1_0.Args[0]
22180 z2 := v_1_1
22181 mem := v_2
22182 if !(z1 == z2) {
22183 continue
22184 }
22185 v.reset(OpAMD64SETAEstore)
22186 v.AuxInt = int32ToAuxInt(off)
22187 v.Aux = symToAux(sym)
22188 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22189 v0.AuxInt = int8ToAuxInt(0)
22190 v0.AddArg(x)
22191 v.AddArg3(ptr, v0, mem)
22192 return true
22193 }
22194 break
22195 }
22196
22197
22198
22199 for {
22200 off := auxIntToInt32(v.AuxInt)
22201 sym := auxToSym(v.Aux)
22202 ptr := v_0
22203 if v_1.Op != OpAMD64TESTL {
22204 break
22205 }
22206 _ = v_1.Args[1]
22207 v_1_0 := v_1.Args[0]
22208 v_1_1 := v_1.Args[1]
22209 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22210 z1 := v_1_0
22211 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
22212 continue
22213 }
22214 z1_0 := z1.Args[0]
22215 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
22216 continue
22217 }
22218 x := z1_0.Args[0]
22219 z2 := v_1_1
22220 mem := v_2
22221 if !(z1 == z2) {
22222 continue
22223 }
22224 v.reset(OpAMD64SETAEstore)
22225 v.AuxInt = int32ToAuxInt(off)
22226 v.Aux = symToAux(sym)
22227 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22228 v0.AuxInt = int8ToAuxInt(0)
22229 v0.AddArg(x)
22230 v.AddArg3(ptr, v0, mem)
22231 return true
22232 }
22233 break
22234 }
22235
22236
22237
22238 for {
22239 off := auxIntToInt32(v.AuxInt)
22240 sym := auxToSym(v.Aux)
22241 ptr := v_0
22242 if v_1.Op != OpAMD64TESTQ {
22243 break
22244 }
22245 _ = v_1.Args[1]
22246 v_1_0 := v_1.Args[0]
22247 v_1_1 := v_1.Args[1]
22248 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22249 z1 := v_1_0
22250 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
22251 continue
22252 }
22253 x := z1.Args[0]
22254 z2 := v_1_1
22255 mem := v_2
22256 if !(z1 == z2) {
22257 continue
22258 }
22259 v.reset(OpAMD64SETAEstore)
22260 v.AuxInt = int32ToAuxInt(off)
22261 v.Aux = symToAux(sym)
22262 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
22263 v0.AuxInt = int8ToAuxInt(63)
22264 v0.AddArg(x)
22265 v.AddArg3(ptr, v0, mem)
22266 return true
22267 }
22268 break
22269 }
22270
22271
22272
22273 for {
22274 off := auxIntToInt32(v.AuxInt)
22275 sym := auxToSym(v.Aux)
22276 ptr := v_0
22277 if v_1.Op != OpAMD64TESTL {
22278 break
22279 }
22280 _ = v_1.Args[1]
22281 v_1_0 := v_1.Args[0]
22282 v_1_1 := v_1.Args[1]
22283 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
22284 z1 := v_1_0
22285 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
22286 continue
22287 }
22288 x := z1.Args[0]
22289 z2 := v_1_1
22290 mem := v_2
22291 if !(z1 == z2) {
22292 continue
22293 }
22294 v.reset(OpAMD64SETAEstore)
22295 v.AuxInt = int32ToAuxInt(off)
22296 v.Aux = symToAux(sym)
22297 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
22298 v0.AuxInt = int8ToAuxInt(31)
22299 v0.AddArg(x)
22300 v.AddArg3(ptr, v0, mem)
22301 return true
22302 }
22303 break
22304 }
22305
22306
22307 for {
22308 off := auxIntToInt32(v.AuxInt)
22309 sym := auxToSym(v.Aux)
22310 ptr := v_0
22311 if v_1.Op != OpAMD64InvertFlags {
22312 break
22313 }
22314 x := v_1.Args[0]
22315 mem := v_2
22316 v.reset(OpAMD64SETEQstore)
22317 v.AuxInt = int32ToAuxInt(off)
22318 v.Aux = symToAux(sym)
22319 v.AddArg3(ptr, x, mem)
22320 return true
22321 }
22322
22323
22324
22325 for {
22326 off1 := auxIntToInt32(v.AuxInt)
22327 sym := auxToSym(v.Aux)
22328 if v_0.Op != OpAMD64ADDQconst {
22329 break
22330 }
22331 off2 := auxIntToInt32(v_0.AuxInt)
22332 base := v_0.Args[0]
22333 val := v_1
22334 mem := v_2
22335 if !(is32Bit(int64(off1) + int64(off2))) {
22336 break
22337 }
22338 v.reset(OpAMD64SETEQstore)
22339 v.AuxInt = int32ToAuxInt(off1 + off2)
22340 v.Aux = symToAux(sym)
22341 v.AddArg3(base, val, mem)
22342 return true
22343 }
22344
22345
22346
22347 for {
22348 off1 := auxIntToInt32(v.AuxInt)
22349 sym1 := auxToSym(v.Aux)
22350 if v_0.Op != OpAMD64LEAQ {
22351 break
22352 }
22353 off2 := auxIntToInt32(v_0.AuxInt)
22354 sym2 := auxToSym(v_0.Aux)
22355 base := v_0.Args[0]
22356 val := v_1
22357 mem := v_2
22358 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22359 break
22360 }
22361 v.reset(OpAMD64SETEQstore)
22362 v.AuxInt = int32ToAuxInt(off1 + off2)
22363 v.Aux = symToAux(mergeSym(sym1, sym2))
22364 v.AddArg3(base, val, mem)
22365 return true
22366 }
22367
22368
22369 for {
22370 off := auxIntToInt32(v.AuxInt)
22371 sym := auxToSym(v.Aux)
22372 ptr := v_0
22373 if v_1.Op != OpAMD64FlagEQ {
22374 break
22375 }
22376 mem := v_2
22377 v.reset(OpAMD64MOVBstore)
22378 v.AuxInt = int32ToAuxInt(off)
22379 v.Aux = symToAux(sym)
22380 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22381 v0.AuxInt = int32ToAuxInt(1)
22382 v.AddArg3(ptr, v0, mem)
22383 return true
22384 }
22385
22386
22387 for {
22388 off := auxIntToInt32(v.AuxInt)
22389 sym := auxToSym(v.Aux)
22390 ptr := v_0
22391 if v_1.Op != OpAMD64FlagLT_ULT {
22392 break
22393 }
22394 mem := v_2
22395 v.reset(OpAMD64MOVBstore)
22396 v.AuxInt = int32ToAuxInt(off)
22397 v.Aux = symToAux(sym)
22398 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22399 v0.AuxInt = int32ToAuxInt(0)
22400 v.AddArg3(ptr, v0, mem)
22401 return true
22402 }
22403
22404
22405 for {
22406 off := auxIntToInt32(v.AuxInt)
22407 sym := auxToSym(v.Aux)
22408 ptr := v_0
22409 if v_1.Op != OpAMD64FlagLT_UGT {
22410 break
22411 }
22412 mem := v_2
22413 v.reset(OpAMD64MOVBstore)
22414 v.AuxInt = int32ToAuxInt(off)
22415 v.Aux = symToAux(sym)
22416 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22417 v0.AuxInt = int32ToAuxInt(0)
22418 v.AddArg3(ptr, v0, mem)
22419 return true
22420 }
22421
22422
22423 for {
22424 off := auxIntToInt32(v.AuxInt)
22425 sym := auxToSym(v.Aux)
22426 ptr := v_0
22427 if v_1.Op != OpAMD64FlagGT_ULT {
22428 break
22429 }
22430 mem := v_2
22431 v.reset(OpAMD64MOVBstore)
22432 v.AuxInt = int32ToAuxInt(off)
22433 v.Aux = symToAux(sym)
22434 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22435 v0.AuxInt = int32ToAuxInt(0)
22436 v.AddArg3(ptr, v0, mem)
22437 return true
22438 }
22439
22440
22441 for {
22442 off := auxIntToInt32(v.AuxInt)
22443 sym := auxToSym(v.Aux)
22444 ptr := v_0
22445 if v_1.Op != OpAMD64FlagGT_UGT {
22446 break
22447 }
22448 mem := v_2
22449 v.reset(OpAMD64MOVBstore)
22450 v.AuxInt = int32ToAuxInt(off)
22451 v.Aux = symToAux(sym)
22452 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22453 v0.AuxInt = int32ToAuxInt(0)
22454 v.AddArg3(ptr, v0, mem)
22455 return true
22456 }
22457 return false
22458 }
22459 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
22460 v_0 := v.Args[0]
22461
22462
22463 for {
22464 if v_0.Op != OpAMD64InvertFlags {
22465 break
22466 }
22467 x := v_0.Args[0]
22468 v.reset(OpAMD64SETL)
22469 v.AddArg(x)
22470 return true
22471 }
22472
22473
22474 for {
22475 if v_0.Op != OpAMD64FlagEQ {
22476 break
22477 }
22478 v.reset(OpAMD64MOVLconst)
22479 v.AuxInt = int32ToAuxInt(0)
22480 return true
22481 }
22482
22483
22484 for {
22485 if v_0.Op != OpAMD64FlagLT_ULT {
22486 break
22487 }
22488 v.reset(OpAMD64MOVLconst)
22489 v.AuxInt = int32ToAuxInt(0)
22490 return true
22491 }
22492
22493
22494 for {
22495 if v_0.Op != OpAMD64FlagLT_UGT {
22496 break
22497 }
22498 v.reset(OpAMD64MOVLconst)
22499 v.AuxInt = int32ToAuxInt(0)
22500 return true
22501 }
22502
22503
22504 for {
22505 if v_0.Op != OpAMD64FlagGT_ULT {
22506 break
22507 }
22508 v.reset(OpAMD64MOVLconst)
22509 v.AuxInt = int32ToAuxInt(1)
22510 return true
22511 }
22512
22513
22514 for {
22515 if v_0.Op != OpAMD64FlagGT_UGT {
22516 break
22517 }
22518 v.reset(OpAMD64MOVLconst)
22519 v.AuxInt = int32ToAuxInt(1)
22520 return true
22521 }
22522 return false
22523 }
22524 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
22525 v_0 := v.Args[0]
22526
22527
22528 for {
22529 if v_0.Op != OpAMD64InvertFlags {
22530 break
22531 }
22532 x := v_0.Args[0]
22533 v.reset(OpAMD64SETLE)
22534 v.AddArg(x)
22535 return true
22536 }
22537
22538
22539 for {
22540 if v_0.Op != OpAMD64FlagEQ {
22541 break
22542 }
22543 v.reset(OpAMD64MOVLconst)
22544 v.AuxInt = int32ToAuxInt(1)
22545 return true
22546 }
22547
22548
22549 for {
22550 if v_0.Op != OpAMD64FlagLT_ULT {
22551 break
22552 }
22553 v.reset(OpAMD64MOVLconst)
22554 v.AuxInt = int32ToAuxInt(0)
22555 return true
22556 }
22557
22558
22559 for {
22560 if v_0.Op != OpAMD64FlagLT_UGT {
22561 break
22562 }
22563 v.reset(OpAMD64MOVLconst)
22564 v.AuxInt = int32ToAuxInt(0)
22565 return true
22566 }
22567
22568
22569 for {
22570 if v_0.Op != OpAMD64FlagGT_ULT {
22571 break
22572 }
22573 v.reset(OpAMD64MOVLconst)
22574 v.AuxInt = int32ToAuxInt(1)
22575 return true
22576 }
22577
22578
22579 for {
22580 if v_0.Op != OpAMD64FlagGT_UGT {
22581 break
22582 }
22583 v.reset(OpAMD64MOVLconst)
22584 v.AuxInt = int32ToAuxInt(1)
22585 return true
22586 }
22587 return false
22588 }
22589 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
22590 v_2 := v.Args[2]
22591 v_1 := v.Args[1]
22592 v_0 := v.Args[0]
22593 b := v.Block
22594 typ := &b.Func.Config.Types
22595
22596
22597 for {
22598 off := auxIntToInt32(v.AuxInt)
22599 sym := auxToSym(v.Aux)
22600 ptr := v_0
22601 if v_1.Op != OpAMD64InvertFlags {
22602 break
22603 }
22604 x := v_1.Args[0]
22605 mem := v_2
22606 v.reset(OpAMD64SETLEstore)
22607 v.AuxInt = int32ToAuxInt(off)
22608 v.Aux = symToAux(sym)
22609 v.AddArg3(ptr, x, mem)
22610 return true
22611 }
22612
22613
22614
22615 for {
22616 off1 := auxIntToInt32(v.AuxInt)
22617 sym := auxToSym(v.Aux)
22618 if v_0.Op != OpAMD64ADDQconst {
22619 break
22620 }
22621 off2 := auxIntToInt32(v_0.AuxInt)
22622 base := v_0.Args[0]
22623 val := v_1
22624 mem := v_2
22625 if !(is32Bit(int64(off1) + int64(off2))) {
22626 break
22627 }
22628 v.reset(OpAMD64SETGEstore)
22629 v.AuxInt = int32ToAuxInt(off1 + off2)
22630 v.Aux = symToAux(sym)
22631 v.AddArg3(base, val, mem)
22632 return true
22633 }
22634
22635
22636
22637 for {
22638 off1 := auxIntToInt32(v.AuxInt)
22639 sym1 := auxToSym(v.Aux)
22640 if v_0.Op != OpAMD64LEAQ {
22641 break
22642 }
22643 off2 := auxIntToInt32(v_0.AuxInt)
22644 sym2 := auxToSym(v_0.Aux)
22645 base := v_0.Args[0]
22646 val := v_1
22647 mem := v_2
22648 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22649 break
22650 }
22651 v.reset(OpAMD64SETGEstore)
22652 v.AuxInt = int32ToAuxInt(off1 + off2)
22653 v.Aux = symToAux(mergeSym(sym1, sym2))
22654 v.AddArg3(base, val, mem)
22655 return true
22656 }
22657
22658
22659 for {
22660 off := auxIntToInt32(v.AuxInt)
22661 sym := auxToSym(v.Aux)
22662 ptr := v_0
22663 if v_1.Op != OpAMD64FlagEQ {
22664 break
22665 }
22666 mem := v_2
22667 v.reset(OpAMD64MOVBstore)
22668 v.AuxInt = int32ToAuxInt(off)
22669 v.Aux = symToAux(sym)
22670 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22671 v0.AuxInt = int32ToAuxInt(1)
22672 v.AddArg3(ptr, v0, mem)
22673 return true
22674 }
22675
22676
22677 for {
22678 off := auxIntToInt32(v.AuxInt)
22679 sym := auxToSym(v.Aux)
22680 ptr := v_0
22681 if v_1.Op != OpAMD64FlagLT_ULT {
22682 break
22683 }
22684 mem := v_2
22685 v.reset(OpAMD64MOVBstore)
22686 v.AuxInt = int32ToAuxInt(off)
22687 v.Aux = symToAux(sym)
22688 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22689 v0.AuxInt = int32ToAuxInt(0)
22690 v.AddArg3(ptr, v0, mem)
22691 return true
22692 }
22693
22694
22695 for {
22696 off := auxIntToInt32(v.AuxInt)
22697 sym := auxToSym(v.Aux)
22698 ptr := v_0
22699 if v_1.Op != OpAMD64FlagLT_UGT {
22700 break
22701 }
22702 mem := v_2
22703 v.reset(OpAMD64MOVBstore)
22704 v.AuxInt = int32ToAuxInt(off)
22705 v.Aux = symToAux(sym)
22706 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22707 v0.AuxInt = int32ToAuxInt(0)
22708 v.AddArg3(ptr, v0, mem)
22709 return true
22710 }
22711
22712
22713 for {
22714 off := auxIntToInt32(v.AuxInt)
22715 sym := auxToSym(v.Aux)
22716 ptr := v_0
22717 if v_1.Op != OpAMD64FlagGT_ULT {
22718 break
22719 }
22720 mem := v_2
22721 v.reset(OpAMD64MOVBstore)
22722 v.AuxInt = int32ToAuxInt(off)
22723 v.Aux = symToAux(sym)
22724 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22725 v0.AuxInt = int32ToAuxInt(1)
22726 v.AddArg3(ptr, v0, mem)
22727 return true
22728 }
22729
22730
22731 for {
22732 off := auxIntToInt32(v.AuxInt)
22733 sym := auxToSym(v.Aux)
22734 ptr := v_0
22735 if v_1.Op != OpAMD64FlagGT_UGT {
22736 break
22737 }
22738 mem := v_2
22739 v.reset(OpAMD64MOVBstore)
22740 v.AuxInt = int32ToAuxInt(off)
22741 v.Aux = symToAux(sym)
22742 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22743 v0.AuxInt = int32ToAuxInt(1)
22744 v.AddArg3(ptr, v0, mem)
22745 return true
22746 }
22747 return false
22748 }
22749 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
22750 v_2 := v.Args[2]
22751 v_1 := v.Args[1]
22752 v_0 := v.Args[0]
22753 b := v.Block
22754 typ := &b.Func.Config.Types
22755
22756
22757 for {
22758 off := auxIntToInt32(v.AuxInt)
22759 sym := auxToSym(v.Aux)
22760 ptr := v_0
22761 if v_1.Op != OpAMD64InvertFlags {
22762 break
22763 }
22764 x := v_1.Args[0]
22765 mem := v_2
22766 v.reset(OpAMD64SETLstore)
22767 v.AuxInt = int32ToAuxInt(off)
22768 v.Aux = symToAux(sym)
22769 v.AddArg3(ptr, x, mem)
22770 return true
22771 }
22772
22773
22774
22775 for {
22776 off1 := auxIntToInt32(v.AuxInt)
22777 sym := auxToSym(v.Aux)
22778 if v_0.Op != OpAMD64ADDQconst {
22779 break
22780 }
22781 off2 := auxIntToInt32(v_0.AuxInt)
22782 base := v_0.Args[0]
22783 val := v_1
22784 mem := v_2
22785 if !(is32Bit(int64(off1) + int64(off2))) {
22786 break
22787 }
22788 v.reset(OpAMD64SETGstore)
22789 v.AuxInt = int32ToAuxInt(off1 + off2)
22790 v.Aux = symToAux(sym)
22791 v.AddArg3(base, val, mem)
22792 return true
22793 }
22794
22795
22796
22797 for {
22798 off1 := auxIntToInt32(v.AuxInt)
22799 sym1 := auxToSym(v.Aux)
22800 if v_0.Op != OpAMD64LEAQ {
22801 break
22802 }
22803 off2 := auxIntToInt32(v_0.AuxInt)
22804 sym2 := auxToSym(v_0.Aux)
22805 base := v_0.Args[0]
22806 val := v_1
22807 mem := v_2
22808 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22809 break
22810 }
22811 v.reset(OpAMD64SETGstore)
22812 v.AuxInt = int32ToAuxInt(off1 + off2)
22813 v.Aux = symToAux(mergeSym(sym1, sym2))
22814 v.AddArg3(base, val, mem)
22815 return true
22816 }
22817
22818
22819 for {
22820 off := auxIntToInt32(v.AuxInt)
22821 sym := auxToSym(v.Aux)
22822 ptr := v_0
22823 if v_1.Op != OpAMD64FlagEQ {
22824 break
22825 }
22826 mem := v_2
22827 v.reset(OpAMD64MOVBstore)
22828 v.AuxInt = int32ToAuxInt(off)
22829 v.Aux = symToAux(sym)
22830 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22831 v0.AuxInt = int32ToAuxInt(0)
22832 v.AddArg3(ptr, v0, mem)
22833 return true
22834 }
22835
22836
22837 for {
22838 off := auxIntToInt32(v.AuxInt)
22839 sym := auxToSym(v.Aux)
22840 ptr := v_0
22841 if v_1.Op != OpAMD64FlagLT_ULT {
22842 break
22843 }
22844 mem := v_2
22845 v.reset(OpAMD64MOVBstore)
22846 v.AuxInt = int32ToAuxInt(off)
22847 v.Aux = symToAux(sym)
22848 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22849 v0.AuxInt = int32ToAuxInt(0)
22850 v.AddArg3(ptr, v0, mem)
22851 return true
22852 }
22853
22854
22855 for {
22856 off := auxIntToInt32(v.AuxInt)
22857 sym := auxToSym(v.Aux)
22858 ptr := v_0
22859 if v_1.Op != OpAMD64FlagLT_UGT {
22860 break
22861 }
22862 mem := v_2
22863 v.reset(OpAMD64MOVBstore)
22864 v.AuxInt = int32ToAuxInt(off)
22865 v.Aux = symToAux(sym)
22866 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22867 v0.AuxInt = int32ToAuxInt(0)
22868 v.AddArg3(ptr, v0, mem)
22869 return true
22870 }
22871
22872
22873 for {
22874 off := auxIntToInt32(v.AuxInt)
22875 sym := auxToSym(v.Aux)
22876 ptr := v_0
22877 if v_1.Op != OpAMD64FlagGT_ULT {
22878 break
22879 }
22880 mem := v_2
22881 v.reset(OpAMD64MOVBstore)
22882 v.AuxInt = int32ToAuxInt(off)
22883 v.Aux = symToAux(sym)
22884 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22885 v0.AuxInt = int32ToAuxInt(1)
22886 v.AddArg3(ptr, v0, mem)
22887 return true
22888 }
22889
22890
22891 for {
22892 off := auxIntToInt32(v.AuxInt)
22893 sym := auxToSym(v.Aux)
22894 ptr := v_0
22895 if v_1.Op != OpAMD64FlagGT_UGT {
22896 break
22897 }
22898 mem := v_2
22899 v.reset(OpAMD64MOVBstore)
22900 v.AuxInt = int32ToAuxInt(off)
22901 v.Aux = symToAux(sym)
22902 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
22903 v0.AuxInt = int32ToAuxInt(1)
22904 v.AddArg3(ptr, v0, mem)
22905 return true
22906 }
22907 return false
22908 }
22909 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
22910 v_0 := v.Args[0]
22911
22912
22913 for {
22914 if v_0.Op != OpAMD64InvertFlags {
22915 break
22916 }
22917 x := v_0.Args[0]
22918 v.reset(OpAMD64SETG)
22919 v.AddArg(x)
22920 return true
22921 }
22922
22923
22924 for {
22925 if v_0.Op != OpAMD64FlagEQ {
22926 break
22927 }
22928 v.reset(OpAMD64MOVLconst)
22929 v.AuxInt = int32ToAuxInt(0)
22930 return true
22931 }
22932
22933
22934 for {
22935 if v_0.Op != OpAMD64FlagLT_ULT {
22936 break
22937 }
22938 v.reset(OpAMD64MOVLconst)
22939 v.AuxInt = int32ToAuxInt(1)
22940 return true
22941 }
22942
22943
22944 for {
22945 if v_0.Op != OpAMD64FlagLT_UGT {
22946 break
22947 }
22948 v.reset(OpAMD64MOVLconst)
22949 v.AuxInt = int32ToAuxInt(1)
22950 return true
22951 }
22952
22953
22954 for {
22955 if v_0.Op != OpAMD64FlagGT_ULT {
22956 break
22957 }
22958 v.reset(OpAMD64MOVLconst)
22959 v.AuxInt = int32ToAuxInt(0)
22960 return true
22961 }
22962
22963
22964 for {
22965 if v_0.Op != OpAMD64FlagGT_UGT {
22966 break
22967 }
22968 v.reset(OpAMD64MOVLconst)
22969 v.AuxInt = int32ToAuxInt(0)
22970 return true
22971 }
22972 return false
22973 }
22974 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
22975 v_0 := v.Args[0]
22976
22977
22978 for {
22979 if v_0.Op != OpAMD64InvertFlags {
22980 break
22981 }
22982 x := v_0.Args[0]
22983 v.reset(OpAMD64SETGE)
22984 v.AddArg(x)
22985 return true
22986 }
22987
22988
22989 for {
22990 if v_0.Op != OpAMD64FlagEQ {
22991 break
22992 }
22993 v.reset(OpAMD64MOVLconst)
22994 v.AuxInt = int32ToAuxInt(1)
22995 return true
22996 }
22997
22998
22999 for {
23000 if v_0.Op != OpAMD64FlagLT_ULT {
23001 break
23002 }
23003 v.reset(OpAMD64MOVLconst)
23004 v.AuxInt = int32ToAuxInt(1)
23005 return true
23006 }
23007
23008
23009 for {
23010 if v_0.Op != OpAMD64FlagLT_UGT {
23011 break
23012 }
23013 v.reset(OpAMD64MOVLconst)
23014 v.AuxInt = int32ToAuxInt(1)
23015 return true
23016 }
23017
23018
23019 for {
23020 if v_0.Op != OpAMD64FlagGT_ULT {
23021 break
23022 }
23023 v.reset(OpAMD64MOVLconst)
23024 v.AuxInt = int32ToAuxInt(0)
23025 return true
23026 }
23027
23028
23029 for {
23030 if v_0.Op != OpAMD64FlagGT_UGT {
23031 break
23032 }
23033 v.reset(OpAMD64MOVLconst)
23034 v.AuxInt = int32ToAuxInt(0)
23035 return true
23036 }
23037 return false
23038 }
23039 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
23040 v_2 := v.Args[2]
23041 v_1 := v.Args[1]
23042 v_0 := v.Args[0]
23043 b := v.Block
23044 typ := &b.Func.Config.Types
23045
23046
23047 for {
23048 off := auxIntToInt32(v.AuxInt)
23049 sym := auxToSym(v.Aux)
23050 ptr := v_0
23051 if v_1.Op != OpAMD64InvertFlags {
23052 break
23053 }
23054 x := v_1.Args[0]
23055 mem := v_2
23056 v.reset(OpAMD64SETGEstore)
23057 v.AuxInt = int32ToAuxInt(off)
23058 v.Aux = symToAux(sym)
23059 v.AddArg3(ptr, x, mem)
23060 return true
23061 }
23062
23063
23064
23065 for {
23066 off1 := auxIntToInt32(v.AuxInt)
23067 sym := auxToSym(v.Aux)
23068 if v_0.Op != OpAMD64ADDQconst {
23069 break
23070 }
23071 off2 := auxIntToInt32(v_0.AuxInt)
23072 base := v_0.Args[0]
23073 val := v_1
23074 mem := v_2
23075 if !(is32Bit(int64(off1) + int64(off2))) {
23076 break
23077 }
23078 v.reset(OpAMD64SETLEstore)
23079 v.AuxInt = int32ToAuxInt(off1 + off2)
23080 v.Aux = symToAux(sym)
23081 v.AddArg3(base, val, mem)
23082 return true
23083 }
23084
23085
23086
23087 for {
23088 off1 := auxIntToInt32(v.AuxInt)
23089 sym1 := auxToSym(v.Aux)
23090 if v_0.Op != OpAMD64LEAQ {
23091 break
23092 }
23093 off2 := auxIntToInt32(v_0.AuxInt)
23094 sym2 := auxToSym(v_0.Aux)
23095 base := v_0.Args[0]
23096 val := v_1
23097 mem := v_2
23098 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23099 break
23100 }
23101 v.reset(OpAMD64SETLEstore)
23102 v.AuxInt = int32ToAuxInt(off1 + off2)
23103 v.Aux = symToAux(mergeSym(sym1, sym2))
23104 v.AddArg3(base, val, mem)
23105 return true
23106 }
23107
23108
23109 for {
23110 off := auxIntToInt32(v.AuxInt)
23111 sym := auxToSym(v.Aux)
23112 ptr := v_0
23113 if v_1.Op != OpAMD64FlagEQ {
23114 break
23115 }
23116 mem := v_2
23117 v.reset(OpAMD64MOVBstore)
23118 v.AuxInt = int32ToAuxInt(off)
23119 v.Aux = symToAux(sym)
23120 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23121 v0.AuxInt = int32ToAuxInt(1)
23122 v.AddArg3(ptr, v0, mem)
23123 return true
23124 }
23125
23126
23127 for {
23128 off := auxIntToInt32(v.AuxInt)
23129 sym := auxToSym(v.Aux)
23130 ptr := v_0
23131 if v_1.Op != OpAMD64FlagLT_ULT {
23132 break
23133 }
23134 mem := v_2
23135 v.reset(OpAMD64MOVBstore)
23136 v.AuxInt = int32ToAuxInt(off)
23137 v.Aux = symToAux(sym)
23138 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23139 v0.AuxInt = int32ToAuxInt(1)
23140 v.AddArg3(ptr, v0, mem)
23141 return true
23142 }
23143
23144
23145 for {
23146 off := auxIntToInt32(v.AuxInt)
23147 sym := auxToSym(v.Aux)
23148 ptr := v_0
23149 if v_1.Op != OpAMD64FlagLT_UGT {
23150 break
23151 }
23152 mem := v_2
23153 v.reset(OpAMD64MOVBstore)
23154 v.AuxInt = int32ToAuxInt(off)
23155 v.Aux = symToAux(sym)
23156 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23157 v0.AuxInt = int32ToAuxInt(1)
23158 v.AddArg3(ptr, v0, mem)
23159 return true
23160 }
23161
23162
23163 for {
23164 off := auxIntToInt32(v.AuxInt)
23165 sym := auxToSym(v.Aux)
23166 ptr := v_0
23167 if v_1.Op != OpAMD64FlagGT_ULT {
23168 break
23169 }
23170 mem := v_2
23171 v.reset(OpAMD64MOVBstore)
23172 v.AuxInt = int32ToAuxInt(off)
23173 v.Aux = symToAux(sym)
23174 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23175 v0.AuxInt = int32ToAuxInt(0)
23176 v.AddArg3(ptr, v0, mem)
23177 return true
23178 }
23179
23180
23181 for {
23182 off := auxIntToInt32(v.AuxInt)
23183 sym := auxToSym(v.Aux)
23184 ptr := v_0
23185 if v_1.Op != OpAMD64FlagGT_UGT {
23186 break
23187 }
23188 mem := v_2
23189 v.reset(OpAMD64MOVBstore)
23190 v.AuxInt = int32ToAuxInt(off)
23191 v.Aux = symToAux(sym)
23192 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23193 v0.AuxInt = int32ToAuxInt(0)
23194 v.AddArg3(ptr, v0, mem)
23195 return true
23196 }
23197 return false
23198 }
23199 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
23200 v_2 := v.Args[2]
23201 v_1 := v.Args[1]
23202 v_0 := v.Args[0]
23203 b := v.Block
23204 typ := &b.Func.Config.Types
23205
23206
23207 for {
23208 off := auxIntToInt32(v.AuxInt)
23209 sym := auxToSym(v.Aux)
23210 ptr := v_0
23211 if v_1.Op != OpAMD64InvertFlags {
23212 break
23213 }
23214 x := v_1.Args[0]
23215 mem := v_2
23216 v.reset(OpAMD64SETGstore)
23217 v.AuxInt = int32ToAuxInt(off)
23218 v.Aux = symToAux(sym)
23219 v.AddArg3(ptr, x, mem)
23220 return true
23221 }
23222
23223
23224
23225 for {
23226 off1 := auxIntToInt32(v.AuxInt)
23227 sym := auxToSym(v.Aux)
23228 if v_0.Op != OpAMD64ADDQconst {
23229 break
23230 }
23231 off2 := auxIntToInt32(v_0.AuxInt)
23232 base := v_0.Args[0]
23233 val := v_1
23234 mem := v_2
23235 if !(is32Bit(int64(off1) + int64(off2))) {
23236 break
23237 }
23238 v.reset(OpAMD64SETLstore)
23239 v.AuxInt = int32ToAuxInt(off1 + off2)
23240 v.Aux = symToAux(sym)
23241 v.AddArg3(base, val, mem)
23242 return true
23243 }
23244
23245
23246
23247 for {
23248 off1 := auxIntToInt32(v.AuxInt)
23249 sym1 := auxToSym(v.Aux)
23250 if v_0.Op != OpAMD64LEAQ {
23251 break
23252 }
23253 off2 := auxIntToInt32(v_0.AuxInt)
23254 sym2 := auxToSym(v_0.Aux)
23255 base := v_0.Args[0]
23256 val := v_1
23257 mem := v_2
23258 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23259 break
23260 }
23261 v.reset(OpAMD64SETLstore)
23262 v.AuxInt = int32ToAuxInt(off1 + off2)
23263 v.Aux = symToAux(mergeSym(sym1, sym2))
23264 v.AddArg3(base, val, mem)
23265 return true
23266 }
23267
23268
23269 for {
23270 off := auxIntToInt32(v.AuxInt)
23271 sym := auxToSym(v.Aux)
23272 ptr := v_0
23273 if v_1.Op != OpAMD64FlagEQ {
23274 break
23275 }
23276 mem := v_2
23277 v.reset(OpAMD64MOVBstore)
23278 v.AuxInt = int32ToAuxInt(off)
23279 v.Aux = symToAux(sym)
23280 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23281 v0.AuxInt = int32ToAuxInt(0)
23282 v.AddArg3(ptr, v0, mem)
23283 return true
23284 }
23285
23286
23287 for {
23288 off := auxIntToInt32(v.AuxInt)
23289 sym := auxToSym(v.Aux)
23290 ptr := v_0
23291 if v_1.Op != OpAMD64FlagLT_ULT {
23292 break
23293 }
23294 mem := v_2
23295 v.reset(OpAMD64MOVBstore)
23296 v.AuxInt = int32ToAuxInt(off)
23297 v.Aux = symToAux(sym)
23298 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23299 v0.AuxInt = int32ToAuxInt(1)
23300 v.AddArg3(ptr, v0, mem)
23301 return true
23302 }
23303
23304
23305 for {
23306 off := auxIntToInt32(v.AuxInt)
23307 sym := auxToSym(v.Aux)
23308 ptr := v_0
23309 if v_1.Op != OpAMD64FlagLT_UGT {
23310 break
23311 }
23312 mem := v_2
23313 v.reset(OpAMD64MOVBstore)
23314 v.AuxInt = int32ToAuxInt(off)
23315 v.Aux = symToAux(sym)
23316 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23317 v0.AuxInt = int32ToAuxInt(1)
23318 v.AddArg3(ptr, v0, mem)
23319 return true
23320 }
23321
23322
23323 for {
23324 off := auxIntToInt32(v.AuxInt)
23325 sym := auxToSym(v.Aux)
23326 ptr := v_0
23327 if v_1.Op != OpAMD64FlagGT_ULT {
23328 break
23329 }
23330 mem := v_2
23331 v.reset(OpAMD64MOVBstore)
23332 v.AuxInt = int32ToAuxInt(off)
23333 v.Aux = symToAux(sym)
23334 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23335 v0.AuxInt = int32ToAuxInt(0)
23336 v.AddArg3(ptr, v0, mem)
23337 return true
23338 }
23339
23340
23341 for {
23342 off := auxIntToInt32(v.AuxInt)
23343 sym := auxToSym(v.Aux)
23344 ptr := v_0
23345 if v_1.Op != OpAMD64FlagGT_UGT {
23346 break
23347 }
23348 mem := v_2
23349 v.reset(OpAMD64MOVBstore)
23350 v.AuxInt = int32ToAuxInt(off)
23351 v.Aux = symToAux(sym)
23352 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
23353 v0.AuxInt = int32ToAuxInt(0)
23354 v.AddArg3(ptr, v0, mem)
23355 return true
23356 }
23357 return false
23358 }
23359 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
23360 v_0 := v.Args[0]
23361 b := v.Block
23362
23363
23364 for {
23365 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
23366 break
23367 }
23368 x := v_0.Args[0]
23369 v.reset(OpAMD64ANDLconst)
23370 v.AuxInt = int32ToAuxInt(1)
23371 v.AddArg(x)
23372 return true
23373 }
23374
23375
23376 for {
23377 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
23378 break
23379 }
23380 x := v_0.Args[0]
23381 v.reset(OpAMD64ANDLconst)
23382 v.AuxInt = int32ToAuxInt(1)
23383 v.AddArg(x)
23384 return true
23385 }
23386
23387
23388 for {
23389 if v_0.Op != OpAMD64TESTL {
23390 break
23391 }
23392 _ = v_0.Args[1]
23393 v_0_0 := v_0.Args[0]
23394 v_0_1 := v_0.Args[1]
23395 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23396 if v_0_0.Op != OpAMD64SHLL {
23397 continue
23398 }
23399 x := v_0_0.Args[1]
23400 v_0_0_0 := v_0_0.Args[0]
23401 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
23402 continue
23403 }
23404 y := v_0_1
23405 v.reset(OpAMD64SETB)
23406 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
23407 v0.AddArg2(x, y)
23408 v.AddArg(v0)
23409 return true
23410 }
23411 break
23412 }
23413
23414
23415 for {
23416 if v_0.Op != OpAMD64TESTQ {
23417 break
23418 }
23419 _ = v_0.Args[1]
23420 v_0_0 := v_0.Args[0]
23421 v_0_1 := v_0.Args[1]
23422 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23423 if v_0_0.Op != OpAMD64SHLQ {
23424 continue
23425 }
23426 x := v_0_0.Args[1]
23427 v_0_0_0 := v_0_0.Args[0]
23428 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
23429 continue
23430 }
23431 y := v_0_1
23432 v.reset(OpAMD64SETB)
23433 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
23434 v0.AddArg2(x, y)
23435 v.AddArg(v0)
23436 return true
23437 }
23438 break
23439 }
23440
23441
23442
23443 for {
23444 if v_0.Op != OpAMD64TESTLconst {
23445 break
23446 }
23447 c := auxIntToInt32(v_0.AuxInt)
23448 x := v_0.Args[0]
23449 if !(isUint32PowerOfTwo(int64(c))) {
23450 break
23451 }
23452 v.reset(OpAMD64SETB)
23453 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23454 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23455 v0.AddArg(x)
23456 v.AddArg(v0)
23457 return true
23458 }
23459
23460
23461
23462 for {
23463 if v_0.Op != OpAMD64TESTQconst {
23464 break
23465 }
23466 c := auxIntToInt32(v_0.AuxInt)
23467 x := v_0.Args[0]
23468 if !(isUint64PowerOfTwo(int64(c))) {
23469 break
23470 }
23471 v.reset(OpAMD64SETB)
23472 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23473 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23474 v0.AddArg(x)
23475 v.AddArg(v0)
23476 return true
23477 }
23478
23479
23480
23481 for {
23482 if v_0.Op != OpAMD64TESTQ {
23483 break
23484 }
23485 _ = v_0.Args[1]
23486 v_0_0 := v_0.Args[0]
23487 v_0_1 := v_0.Args[1]
23488 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23489 if v_0_0.Op != OpAMD64MOVQconst {
23490 continue
23491 }
23492 c := auxIntToInt64(v_0_0.AuxInt)
23493 x := v_0_1
23494 if !(isUint64PowerOfTwo(c)) {
23495 continue
23496 }
23497 v.reset(OpAMD64SETB)
23498 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23499 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
23500 v0.AddArg(x)
23501 v.AddArg(v0)
23502 return true
23503 }
23504 break
23505 }
23506
23507
23508 for {
23509 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
23510 break
23511 }
23512 s := v_0.Args[0]
23513 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
23514 break
23515 }
23516 v.reset(OpAMD64SETEQ)
23517 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
23518 v0.AuxInt = int32ToAuxInt(0)
23519 v0.AddArg(s)
23520 v.AddArg(v0)
23521 return true
23522 }
23523
23524
23525 for {
23526 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
23527 break
23528 }
23529 s := v_0.Args[0]
23530 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
23531 break
23532 }
23533 v.reset(OpAMD64SETEQ)
23534 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
23535 v0.AuxInt = int32ToAuxInt(0)
23536 v0.AddArg(s)
23537 v.AddArg(v0)
23538 return true
23539 }
23540
23541
23542
23543 for {
23544 if v_0.Op != OpAMD64TESTQ {
23545 break
23546 }
23547 _ = v_0.Args[1]
23548 v_0_0 := v_0.Args[0]
23549 v_0_1 := v_0.Args[1]
23550 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23551 z1 := v_0_0
23552 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
23553 continue
23554 }
23555 z1_0 := z1.Args[0]
23556 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
23557 continue
23558 }
23559 x := z1_0.Args[0]
23560 z2 := v_0_1
23561 if !(z1 == z2) {
23562 continue
23563 }
23564 v.reset(OpAMD64SETB)
23565 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23566 v0.AuxInt = int8ToAuxInt(63)
23567 v0.AddArg(x)
23568 v.AddArg(v0)
23569 return true
23570 }
23571 break
23572 }
23573
23574
23575
23576 for {
23577 if v_0.Op != OpAMD64TESTL {
23578 break
23579 }
23580 _ = v_0.Args[1]
23581 v_0_0 := v_0.Args[0]
23582 v_0_1 := v_0.Args[1]
23583 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23584 z1 := v_0_0
23585 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
23586 continue
23587 }
23588 z1_0 := z1.Args[0]
23589 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
23590 continue
23591 }
23592 x := z1_0.Args[0]
23593 z2 := v_0_1
23594 if !(z1 == z2) {
23595 continue
23596 }
23597 v.reset(OpAMD64SETB)
23598 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23599 v0.AuxInt = int8ToAuxInt(31)
23600 v0.AddArg(x)
23601 v.AddArg(v0)
23602 return true
23603 }
23604 break
23605 }
23606
23607
23608
23609 for {
23610 if v_0.Op != OpAMD64TESTQ {
23611 break
23612 }
23613 _ = v_0.Args[1]
23614 v_0_0 := v_0.Args[0]
23615 v_0_1 := v_0.Args[1]
23616 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23617 z1 := v_0_0
23618 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
23619 continue
23620 }
23621 z1_0 := z1.Args[0]
23622 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
23623 continue
23624 }
23625 x := z1_0.Args[0]
23626 z2 := v_0_1
23627 if !(z1 == z2) {
23628 continue
23629 }
23630 v.reset(OpAMD64SETB)
23631 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23632 v0.AuxInt = int8ToAuxInt(0)
23633 v0.AddArg(x)
23634 v.AddArg(v0)
23635 return true
23636 }
23637 break
23638 }
23639
23640
23641
23642 for {
23643 if v_0.Op != OpAMD64TESTL {
23644 break
23645 }
23646 _ = v_0.Args[1]
23647 v_0_0 := v_0.Args[0]
23648 v_0_1 := v_0.Args[1]
23649 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23650 z1 := v_0_0
23651 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
23652 continue
23653 }
23654 z1_0 := z1.Args[0]
23655 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
23656 continue
23657 }
23658 x := z1_0.Args[0]
23659 z2 := v_0_1
23660 if !(z1 == z2) {
23661 continue
23662 }
23663 v.reset(OpAMD64SETB)
23664 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23665 v0.AuxInt = int8ToAuxInt(0)
23666 v0.AddArg(x)
23667 v.AddArg(v0)
23668 return true
23669 }
23670 break
23671 }
23672
23673
23674
23675 for {
23676 if v_0.Op != OpAMD64TESTQ {
23677 break
23678 }
23679 _ = v_0.Args[1]
23680 v_0_0 := v_0.Args[0]
23681 v_0_1 := v_0.Args[1]
23682 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23683 z1 := v_0_0
23684 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
23685 continue
23686 }
23687 x := z1.Args[0]
23688 z2 := v_0_1
23689 if !(z1 == z2) {
23690 continue
23691 }
23692 v.reset(OpAMD64SETB)
23693 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23694 v0.AuxInt = int8ToAuxInt(63)
23695 v0.AddArg(x)
23696 v.AddArg(v0)
23697 return true
23698 }
23699 break
23700 }
23701
23702
23703
23704 for {
23705 if v_0.Op != OpAMD64TESTL {
23706 break
23707 }
23708 _ = v_0.Args[1]
23709 v_0_0 := v_0.Args[0]
23710 v_0_1 := v_0.Args[1]
23711 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
23712 z1 := v_0_0
23713 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
23714 continue
23715 }
23716 x := z1.Args[0]
23717 z2 := v_0_1
23718 if !(z1 == z2) {
23719 continue
23720 }
23721 v.reset(OpAMD64SETB)
23722 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23723 v0.AuxInt = int8ToAuxInt(31)
23724 v0.AddArg(x)
23725 v.AddArg(v0)
23726 return true
23727 }
23728 break
23729 }
23730
23731
23732 for {
23733 if v_0.Op != OpAMD64InvertFlags {
23734 break
23735 }
23736 x := v_0.Args[0]
23737 v.reset(OpAMD64SETNE)
23738 v.AddArg(x)
23739 return true
23740 }
23741
23742
23743 for {
23744 if v_0.Op != OpAMD64FlagEQ {
23745 break
23746 }
23747 v.reset(OpAMD64MOVLconst)
23748 v.AuxInt = int32ToAuxInt(0)
23749 return true
23750 }
23751
23752
23753 for {
23754 if v_0.Op != OpAMD64FlagLT_ULT {
23755 break
23756 }
23757 v.reset(OpAMD64MOVLconst)
23758 v.AuxInt = int32ToAuxInt(1)
23759 return true
23760 }
23761
23762
23763 for {
23764 if v_0.Op != OpAMD64FlagLT_UGT {
23765 break
23766 }
23767 v.reset(OpAMD64MOVLconst)
23768 v.AuxInt = int32ToAuxInt(1)
23769 return true
23770 }
23771
23772
23773 for {
23774 if v_0.Op != OpAMD64FlagGT_ULT {
23775 break
23776 }
23777 v.reset(OpAMD64MOVLconst)
23778 v.AuxInt = int32ToAuxInt(1)
23779 return true
23780 }
23781
23782
23783 for {
23784 if v_0.Op != OpAMD64FlagGT_UGT {
23785 break
23786 }
23787 v.reset(OpAMD64MOVLconst)
23788 v.AuxInt = int32ToAuxInt(1)
23789 return true
23790 }
23791 return false
23792 }
23793 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
23794 v_2 := v.Args[2]
23795 v_1 := v.Args[1]
23796 v_0 := v.Args[0]
23797 b := v.Block
23798 typ := &b.Func.Config.Types
23799
23800
23801 for {
23802 off := auxIntToInt32(v.AuxInt)
23803 sym := auxToSym(v.Aux)
23804 ptr := v_0
23805 if v_1.Op != OpAMD64TESTL {
23806 break
23807 }
23808 _ = v_1.Args[1]
23809 v_1_0 := v_1.Args[0]
23810 v_1_1 := v_1.Args[1]
23811 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23812 if v_1_0.Op != OpAMD64SHLL {
23813 continue
23814 }
23815 x := v_1_0.Args[1]
23816 v_1_0_0 := v_1_0.Args[0]
23817 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
23818 continue
23819 }
23820 y := v_1_1
23821 mem := v_2
23822 v.reset(OpAMD64SETBstore)
23823 v.AuxInt = int32ToAuxInt(off)
23824 v.Aux = symToAux(sym)
23825 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
23826 v0.AddArg2(x, y)
23827 v.AddArg3(ptr, v0, mem)
23828 return true
23829 }
23830 break
23831 }
23832
23833
23834 for {
23835 off := auxIntToInt32(v.AuxInt)
23836 sym := auxToSym(v.Aux)
23837 ptr := v_0
23838 if v_1.Op != OpAMD64TESTQ {
23839 break
23840 }
23841 _ = v_1.Args[1]
23842 v_1_0 := v_1.Args[0]
23843 v_1_1 := v_1.Args[1]
23844 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23845 if v_1_0.Op != OpAMD64SHLQ {
23846 continue
23847 }
23848 x := v_1_0.Args[1]
23849 v_1_0_0 := v_1_0.Args[0]
23850 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
23851 continue
23852 }
23853 y := v_1_1
23854 mem := v_2
23855 v.reset(OpAMD64SETBstore)
23856 v.AuxInt = int32ToAuxInt(off)
23857 v.Aux = symToAux(sym)
23858 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
23859 v0.AddArg2(x, y)
23860 v.AddArg3(ptr, v0, mem)
23861 return true
23862 }
23863 break
23864 }
23865
23866
23867
23868 for {
23869 off := auxIntToInt32(v.AuxInt)
23870 sym := auxToSym(v.Aux)
23871 ptr := v_0
23872 if v_1.Op != OpAMD64TESTLconst {
23873 break
23874 }
23875 c := auxIntToInt32(v_1.AuxInt)
23876 x := v_1.Args[0]
23877 mem := v_2
23878 if !(isUint32PowerOfTwo(int64(c))) {
23879 break
23880 }
23881 v.reset(OpAMD64SETBstore)
23882 v.AuxInt = int32ToAuxInt(off)
23883 v.Aux = symToAux(sym)
23884 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
23885 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23886 v0.AddArg(x)
23887 v.AddArg3(ptr, v0, mem)
23888 return true
23889 }
23890
23891
23892
23893 for {
23894 off := auxIntToInt32(v.AuxInt)
23895 sym := auxToSym(v.Aux)
23896 ptr := v_0
23897 if v_1.Op != OpAMD64TESTQconst {
23898 break
23899 }
23900 c := auxIntToInt32(v_1.AuxInt)
23901 x := v_1.Args[0]
23902 mem := v_2
23903 if !(isUint64PowerOfTwo(int64(c))) {
23904 break
23905 }
23906 v.reset(OpAMD64SETBstore)
23907 v.AuxInt = int32ToAuxInt(off)
23908 v.Aux = symToAux(sym)
23909 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23910 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
23911 v0.AddArg(x)
23912 v.AddArg3(ptr, v0, mem)
23913 return true
23914 }
23915
23916
23917
23918 for {
23919 off := auxIntToInt32(v.AuxInt)
23920 sym := auxToSym(v.Aux)
23921 ptr := v_0
23922 if v_1.Op != OpAMD64TESTQ {
23923 break
23924 }
23925 _ = v_1.Args[1]
23926 v_1_0 := v_1.Args[0]
23927 v_1_1 := v_1.Args[1]
23928 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
23929 if v_1_0.Op != OpAMD64MOVQconst {
23930 continue
23931 }
23932 c := auxIntToInt64(v_1_0.AuxInt)
23933 x := v_1_1
23934 mem := v_2
23935 if !(isUint64PowerOfTwo(c)) {
23936 continue
23937 }
23938 v.reset(OpAMD64SETBstore)
23939 v.AuxInt = int32ToAuxInt(off)
23940 v.Aux = symToAux(sym)
23941 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
23942 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
23943 v0.AddArg(x)
23944 v.AddArg3(ptr, v0, mem)
23945 return true
23946 }
23947 break
23948 }
23949
23950
23951 for {
23952 off := auxIntToInt32(v.AuxInt)
23953 sym := auxToSym(v.Aux)
23954 ptr := v_0
23955 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
23956 break
23957 }
23958 s := v_1.Args[0]
23959 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
23960 break
23961 }
23962 mem := v_2
23963 v.reset(OpAMD64SETEQstore)
23964 v.AuxInt = int32ToAuxInt(off)
23965 v.Aux = symToAux(sym)
23966 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
23967 v0.AuxInt = int32ToAuxInt(0)
23968 v0.AddArg(s)
23969 v.AddArg3(ptr, v0, mem)
23970 return true
23971 }
23972
23973
23974 for {
23975 off := auxIntToInt32(v.AuxInt)
23976 sym := auxToSym(v.Aux)
23977 ptr := v_0
23978 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
23979 break
23980 }
23981 s := v_1.Args[0]
23982 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
23983 break
23984 }
23985 mem := v_2
23986 v.reset(OpAMD64SETEQstore)
23987 v.AuxInt = int32ToAuxInt(off)
23988 v.Aux = symToAux(sym)
23989 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
23990 v0.AuxInt = int32ToAuxInt(0)
23991 v0.AddArg(s)
23992 v.AddArg3(ptr, v0, mem)
23993 return true
23994 }
23995
23996
23997
23998 for {
23999 off := auxIntToInt32(v.AuxInt)
24000 sym := auxToSym(v.Aux)
24001 ptr := v_0
24002 if v_1.Op != OpAMD64TESTQ {
24003 break
24004 }
24005 _ = v_1.Args[1]
24006 v_1_0 := v_1.Args[0]
24007 v_1_1 := v_1.Args[1]
24008 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24009 z1 := v_1_0
24010 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
24011 continue
24012 }
24013 z1_0 := z1.Args[0]
24014 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
24015 continue
24016 }
24017 x := z1_0.Args[0]
24018 z2 := v_1_1
24019 mem := v_2
24020 if !(z1 == z2) {
24021 continue
24022 }
24023 v.reset(OpAMD64SETBstore)
24024 v.AuxInt = int32ToAuxInt(off)
24025 v.Aux = symToAux(sym)
24026 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24027 v0.AuxInt = int8ToAuxInt(63)
24028 v0.AddArg(x)
24029 v.AddArg3(ptr, v0, mem)
24030 return true
24031 }
24032 break
24033 }
24034
24035
24036
24037 for {
24038 off := auxIntToInt32(v.AuxInt)
24039 sym := auxToSym(v.Aux)
24040 ptr := v_0
24041 if v_1.Op != OpAMD64TESTL {
24042 break
24043 }
24044 _ = v_1.Args[1]
24045 v_1_0 := v_1.Args[0]
24046 v_1_1 := v_1.Args[1]
24047 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24048 z1 := v_1_0
24049 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
24050 continue
24051 }
24052 z1_0 := z1.Args[0]
24053 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
24054 continue
24055 }
24056 x := z1_0.Args[0]
24057 z2 := v_1_1
24058 mem := v_2
24059 if !(z1 == z2) {
24060 continue
24061 }
24062 v.reset(OpAMD64SETBstore)
24063 v.AuxInt = int32ToAuxInt(off)
24064 v.Aux = symToAux(sym)
24065 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24066 v0.AuxInt = int8ToAuxInt(31)
24067 v0.AddArg(x)
24068 v.AddArg3(ptr, v0, mem)
24069 return true
24070 }
24071 break
24072 }
24073
24074
24075
24076 for {
24077 off := auxIntToInt32(v.AuxInt)
24078 sym := auxToSym(v.Aux)
24079 ptr := v_0
24080 if v_1.Op != OpAMD64TESTQ {
24081 break
24082 }
24083 _ = v_1.Args[1]
24084 v_1_0 := v_1.Args[0]
24085 v_1_1 := v_1.Args[1]
24086 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24087 z1 := v_1_0
24088 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
24089 continue
24090 }
24091 z1_0 := z1.Args[0]
24092 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
24093 continue
24094 }
24095 x := z1_0.Args[0]
24096 z2 := v_1_1
24097 mem := v_2
24098 if !(z1 == z2) {
24099 continue
24100 }
24101 v.reset(OpAMD64SETBstore)
24102 v.AuxInt = int32ToAuxInt(off)
24103 v.Aux = symToAux(sym)
24104 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24105 v0.AuxInt = int8ToAuxInt(0)
24106 v0.AddArg(x)
24107 v.AddArg3(ptr, v0, mem)
24108 return true
24109 }
24110 break
24111 }
24112
24113
24114
24115 for {
24116 off := auxIntToInt32(v.AuxInt)
24117 sym := auxToSym(v.Aux)
24118 ptr := v_0
24119 if v_1.Op != OpAMD64TESTL {
24120 break
24121 }
24122 _ = v_1.Args[1]
24123 v_1_0 := v_1.Args[0]
24124 v_1_1 := v_1.Args[1]
24125 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24126 z1 := v_1_0
24127 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
24128 continue
24129 }
24130 z1_0 := z1.Args[0]
24131 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
24132 continue
24133 }
24134 x := z1_0.Args[0]
24135 z2 := v_1_1
24136 mem := v_2
24137 if !(z1 == z2) {
24138 continue
24139 }
24140 v.reset(OpAMD64SETBstore)
24141 v.AuxInt = int32ToAuxInt(off)
24142 v.Aux = symToAux(sym)
24143 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24144 v0.AuxInt = int8ToAuxInt(0)
24145 v0.AddArg(x)
24146 v.AddArg3(ptr, v0, mem)
24147 return true
24148 }
24149 break
24150 }
24151
24152
24153
24154 for {
24155 off := auxIntToInt32(v.AuxInt)
24156 sym := auxToSym(v.Aux)
24157 ptr := v_0
24158 if v_1.Op != OpAMD64TESTQ {
24159 break
24160 }
24161 _ = v_1.Args[1]
24162 v_1_0 := v_1.Args[0]
24163 v_1_1 := v_1.Args[1]
24164 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24165 z1 := v_1_0
24166 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
24167 continue
24168 }
24169 x := z1.Args[0]
24170 z2 := v_1_1
24171 mem := v_2
24172 if !(z1 == z2) {
24173 continue
24174 }
24175 v.reset(OpAMD64SETBstore)
24176 v.AuxInt = int32ToAuxInt(off)
24177 v.Aux = symToAux(sym)
24178 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
24179 v0.AuxInt = int8ToAuxInt(63)
24180 v0.AddArg(x)
24181 v.AddArg3(ptr, v0, mem)
24182 return true
24183 }
24184 break
24185 }
24186
24187
24188
24189 for {
24190 off := auxIntToInt32(v.AuxInt)
24191 sym := auxToSym(v.Aux)
24192 ptr := v_0
24193 if v_1.Op != OpAMD64TESTL {
24194 break
24195 }
24196 _ = v_1.Args[1]
24197 v_1_0 := v_1.Args[0]
24198 v_1_1 := v_1.Args[1]
24199 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
24200 z1 := v_1_0
24201 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
24202 continue
24203 }
24204 x := z1.Args[0]
24205 z2 := v_1_1
24206 mem := v_2
24207 if !(z1 == z2) {
24208 continue
24209 }
24210 v.reset(OpAMD64SETBstore)
24211 v.AuxInt = int32ToAuxInt(off)
24212 v.Aux = symToAux(sym)
24213 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
24214 v0.AuxInt = int8ToAuxInt(31)
24215 v0.AddArg(x)
24216 v.AddArg3(ptr, v0, mem)
24217 return true
24218 }
24219 break
24220 }
24221
24222
24223 for {
24224 off := auxIntToInt32(v.AuxInt)
24225 sym := auxToSym(v.Aux)
24226 ptr := v_0
24227 if v_1.Op != OpAMD64InvertFlags {
24228 break
24229 }
24230 x := v_1.Args[0]
24231 mem := v_2
24232 v.reset(OpAMD64SETNEstore)
24233 v.AuxInt = int32ToAuxInt(off)
24234 v.Aux = symToAux(sym)
24235 v.AddArg3(ptr, x, mem)
24236 return true
24237 }
24238
24239
24240
24241 for {
24242 off1 := auxIntToInt32(v.AuxInt)
24243 sym := auxToSym(v.Aux)
24244 if v_0.Op != OpAMD64ADDQconst {
24245 break
24246 }
24247 off2 := auxIntToInt32(v_0.AuxInt)
24248 base := v_0.Args[0]
24249 val := v_1
24250 mem := v_2
24251 if !(is32Bit(int64(off1) + int64(off2))) {
24252 break
24253 }
24254 v.reset(OpAMD64SETNEstore)
24255 v.AuxInt = int32ToAuxInt(off1 + off2)
24256 v.Aux = symToAux(sym)
24257 v.AddArg3(base, val, mem)
24258 return true
24259 }
24260
24261
24262
24263 for {
24264 off1 := auxIntToInt32(v.AuxInt)
24265 sym1 := auxToSym(v.Aux)
24266 if v_0.Op != OpAMD64LEAQ {
24267 break
24268 }
24269 off2 := auxIntToInt32(v_0.AuxInt)
24270 sym2 := auxToSym(v_0.Aux)
24271 base := v_0.Args[0]
24272 val := v_1
24273 mem := v_2
24274 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
24275 break
24276 }
24277 v.reset(OpAMD64SETNEstore)
24278 v.AuxInt = int32ToAuxInt(off1 + off2)
24279 v.Aux = symToAux(mergeSym(sym1, sym2))
24280 v.AddArg3(base, val, mem)
24281 return true
24282 }
24283
24284
24285 for {
24286 off := auxIntToInt32(v.AuxInt)
24287 sym := auxToSym(v.Aux)
24288 ptr := v_0
24289 if v_1.Op != OpAMD64FlagEQ {
24290 break
24291 }
24292 mem := v_2
24293 v.reset(OpAMD64MOVBstore)
24294 v.AuxInt = int32ToAuxInt(off)
24295 v.Aux = symToAux(sym)
24296 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24297 v0.AuxInt = int32ToAuxInt(0)
24298 v.AddArg3(ptr, v0, mem)
24299 return true
24300 }
24301
24302
24303 for {
24304 off := auxIntToInt32(v.AuxInt)
24305 sym := auxToSym(v.Aux)
24306 ptr := v_0
24307 if v_1.Op != OpAMD64FlagLT_ULT {
24308 break
24309 }
24310 mem := v_2
24311 v.reset(OpAMD64MOVBstore)
24312 v.AuxInt = int32ToAuxInt(off)
24313 v.Aux = symToAux(sym)
24314 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24315 v0.AuxInt = int32ToAuxInt(1)
24316 v.AddArg3(ptr, v0, mem)
24317 return true
24318 }
24319
24320
24321 for {
24322 off := auxIntToInt32(v.AuxInt)
24323 sym := auxToSym(v.Aux)
24324 ptr := v_0
24325 if v_1.Op != OpAMD64FlagLT_UGT {
24326 break
24327 }
24328 mem := v_2
24329 v.reset(OpAMD64MOVBstore)
24330 v.AuxInt = int32ToAuxInt(off)
24331 v.Aux = symToAux(sym)
24332 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24333 v0.AuxInt = int32ToAuxInt(1)
24334 v.AddArg3(ptr, v0, mem)
24335 return true
24336 }
24337
24338
24339 for {
24340 off := auxIntToInt32(v.AuxInt)
24341 sym := auxToSym(v.Aux)
24342 ptr := v_0
24343 if v_1.Op != OpAMD64FlagGT_ULT {
24344 break
24345 }
24346 mem := v_2
24347 v.reset(OpAMD64MOVBstore)
24348 v.AuxInt = int32ToAuxInt(off)
24349 v.Aux = symToAux(sym)
24350 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24351 v0.AuxInt = int32ToAuxInt(1)
24352 v.AddArg3(ptr, v0, mem)
24353 return true
24354 }
24355
24356
24357 for {
24358 off := auxIntToInt32(v.AuxInt)
24359 sym := auxToSym(v.Aux)
24360 ptr := v_0
24361 if v_1.Op != OpAMD64FlagGT_UGT {
24362 break
24363 }
24364 mem := v_2
24365 v.reset(OpAMD64MOVBstore)
24366 v.AuxInt = int32ToAuxInt(off)
24367 v.Aux = symToAux(sym)
24368 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
24369 v0.AuxInt = int32ToAuxInt(1)
24370 v.AddArg3(ptr, v0, mem)
24371 return true
24372 }
24373 return false
24374 }
24375 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
24376 v_1 := v.Args[1]
24377 v_0 := v.Args[0]
24378 b := v.Block
24379
24380
24381 for {
24382 x := v_0
24383 if v_1.Op != OpAMD64MOVQconst {
24384 break
24385 }
24386 c := auxIntToInt64(v_1.AuxInt)
24387 v.reset(OpAMD64SHLLconst)
24388 v.AuxInt = int8ToAuxInt(int8(c & 31))
24389 v.AddArg(x)
24390 return true
24391 }
24392
24393
24394 for {
24395 x := v_0
24396 if v_1.Op != OpAMD64MOVLconst {
24397 break
24398 }
24399 c := auxIntToInt32(v_1.AuxInt)
24400 v.reset(OpAMD64SHLLconst)
24401 v.AuxInt = int8ToAuxInt(int8(c & 31))
24402 v.AddArg(x)
24403 return true
24404 }
24405
24406
24407
24408 for {
24409 x := v_0
24410 if v_1.Op != OpAMD64ADDQconst {
24411 break
24412 }
24413 c := auxIntToInt32(v_1.AuxInt)
24414 y := v_1.Args[0]
24415 if !(c&31 == 0) {
24416 break
24417 }
24418 v.reset(OpAMD64SHLL)
24419 v.AddArg2(x, y)
24420 return true
24421 }
24422
24423
24424
24425 for {
24426 x := v_0
24427 if v_1.Op != OpAMD64NEGQ {
24428 break
24429 }
24430 t := v_1.Type
24431 v_1_0 := v_1.Args[0]
24432 if v_1_0.Op != OpAMD64ADDQconst {
24433 break
24434 }
24435 c := auxIntToInt32(v_1_0.AuxInt)
24436 y := v_1_0.Args[0]
24437 if !(c&31 == 0) {
24438 break
24439 }
24440 v.reset(OpAMD64SHLL)
24441 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
24442 v0.AddArg(y)
24443 v.AddArg2(x, v0)
24444 return true
24445 }
24446
24447
24448
24449 for {
24450 x := v_0
24451 if v_1.Op != OpAMD64ANDQconst {
24452 break
24453 }
24454 c := auxIntToInt32(v_1.AuxInt)
24455 y := v_1.Args[0]
24456 if !(c&31 == 31) {
24457 break
24458 }
24459 v.reset(OpAMD64SHLL)
24460 v.AddArg2(x, y)
24461 return true
24462 }
24463
24464
24465
24466 for {
24467 x := v_0
24468 if v_1.Op != OpAMD64NEGQ {
24469 break
24470 }
24471 t := v_1.Type
24472 v_1_0 := v_1.Args[0]
24473 if v_1_0.Op != OpAMD64ANDQconst {
24474 break
24475 }
24476 c := auxIntToInt32(v_1_0.AuxInt)
24477 y := v_1_0.Args[0]
24478 if !(c&31 == 31) {
24479 break
24480 }
24481 v.reset(OpAMD64SHLL)
24482 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
24483 v0.AddArg(y)
24484 v.AddArg2(x, v0)
24485 return true
24486 }
24487
24488
24489
24490 for {
24491 x := v_0
24492 if v_1.Op != OpAMD64ADDLconst {
24493 break
24494 }
24495 c := auxIntToInt32(v_1.AuxInt)
24496 y := v_1.Args[0]
24497 if !(c&31 == 0) {
24498 break
24499 }
24500 v.reset(OpAMD64SHLL)
24501 v.AddArg2(x, y)
24502 return true
24503 }
24504
24505
24506
24507 for {
24508 x := v_0
24509 if v_1.Op != OpAMD64NEGL {
24510 break
24511 }
24512 t := v_1.Type
24513 v_1_0 := v_1.Args[0]
24514 if v_1_0.Op != OpAMD64ADDLconst {
24515 break
24516 }
24517 c := auxIntToInt32(v_1_0.AuxInt)
24518 y := v_1_0.Args[0]
24519 if !(c&31 == 0) {
24520 break
24521 }
24522 v.reset(OpAMD64SHLL)
24523 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
24524 v0.AddArg(y)
24525 v.AddArg2(x, v0)
24526 return true
24527 }
24528
24529
24530
24531 for {
24532 x := v_0
24533 if v_1.Op != OpAMD64ANDLconst {
24534 break
24535 }
24536 c := auxIntToInt32(v_1.AuxInt)
24537 y := v_1.Args[0]
24538 if !(c&31 == 31) {
24539 break
24540 }
24541 v.reset(OpAMD64SHLL)
24542 v.AddArg2(x, y)
24543 return true
24544 }
24545
24546
24547
24548 for {
24549 x := v_0
24550 if v_1.Op != OpAMD64NEGL {
24551 break
24552 }
24553 t := v_1.Type
24554 v_1_0 := v_1.Args[0]
24555 if v_1_0.Op != OpAMD64ANDLconst {
24556 break
24557 }
24558 c := auxIntToInt32(v_1_0.AuxInt)
24559 y := v_1_0.Args[0]
24560 if !(c&31 == 31) {
24561 break
24562 }
24563 v.reset(OpAMD64SHLL)
24564 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
24565 v0.AddArg(y)
24566 v.AddArg2(x, v0)
24567 return true
24568 }
24569 return false
24570 }
24571 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
24572 v_0 := v.Args[0]
24573
24574
24575 for {
24576 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
24577 break
24578 }
24579 x := v_0.Args[0]
24580 v.reset(OpAMD64BTRLconst)
24581 v.AuxInt = int8ToAuxInt(0)
24582 v.AddArg(x)
24583 return true
24584 }
24585
24586
24587 for {
24588 if auxIntToInt8(v.AuxInt) != 0 {
24589 break
24590 }
24591 x := v_0
24592 v.copyOf(x)
24593 return true
24594 }
24595
24596
24597 for {
24598 d := auxIntToInt8(v.AuxInt)
24599 if v_0.Op != OpAMD64MOVLconst {
24600 break
24601 }
24602 c := auxIntToInt32(v_0.AuxInt)
24603 v.reset(OpAMD64MOVLconst)
24604 v.AuxInt = int32ToAuxInt(c << uint64(d))
24605 return true
24606 }
24607 return false
24608 }
24609 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
24610 v_1 := v.Args[1]
24611 v_0 := v.Args[0]
24612 b := v.Block
24613
24614
24615 for {
24616 x := v_0
24617 if v_1.Op != OpAMD64MOVQconst {
24618 break
24619 }
24620 c := auxIntToInt64(v_1.AuxInt)
24621 v.reset(OpAMD64SHLQconst)
24622 v.AuxInt = int8ToAuxInt(int8(c & 63))
24623 v.AddArg(x)
24624 return true
24625 }
24626
24627
24628 for {
24629 x := v_0
24630 if v_1.Op != OpAMD64MOVLconst {
24631 break
24632 }
24633 c := auxIntToInt32(v_1.AuxInt)
24634 v.reset(OpAMD64SHLQconst)
24635 v.AuxInt = int8ToAuxInt(int8(c & 63))
24636 v.AddArg(x)
24637 return true
24638 }
24639
24640
24641
24642 for {
24643 x := v_0
24644 if v_1.Op != OpAMD64ADDQconst {
24645 break
24646 }
24647 c := auxIntToInt32(v_1.AuxInt)
24648 y := v_1.Args[0]
24649 if !(c&63 == 0) {
24650 break
24651 }
24652 v.reset(OpAMD64SHLQ)
24653 v.AddArg2(x, y)
24654 return true
24655 }
24656
24657
24658
24659 for {
24660 x := v_0
24661 if v_1.Op != OpAMD64NEGQ {
24662 break
24663 }
24664 t := v_1.Type
24665 v_1_0 := v_1.Args[0]
24666 if v_1_0.Op != OpAMD64ADDQconst {
24667 break
24668 }
24669 c := auxIntToInt32(v_1_0.AuxInt)
24670 y := v_1_0.Args[0]
24671 if !(c&63 == 0) {
24672 break
24673 }
24674 v.reset(OpAMD64SHLQ)
24675 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
24676 v0.AddArg(y)
24677 v.AddArg2(x, v0)
24678 return true
24679 }
24680
24681
24682
24683 for {
24684 x := v_0
24685 if v_1.Op != OpAMD64ANDQconst {
24686 break
24687 }
24688 c := auxIntToInt32(v_1.AuxInt)
24689 y := v_1.Args[0]
24690 if !(c&63 == 63) {
24691 break
24692 }
24693 v.reset(OpAMD64SHLQ)
24694 v.AddArg2(x, y)
24695 return true
24696 }
24697
24698
24699
24700 for {
24701 x := v_0
24702 if v_1.Op != OpAMD64NEGQ {
24703 break
24704 }
24705 t := v_1.Type
24706 v_1_0 := v_1.Args[0]
24707 if v_1_0.Op != OpAMD64ANDQconst {
24708 break
24709 }
24710 c := auxIntToInt32(v_1_0.AuxInt)
24711 y := v_1_0.Args[0]
24712 if !(c&63 == 63) {
24713 break
24714 }
24715 v.reset(OpAMD64SHLQ)
24716 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
24717 v0.AddArg(y)
24718 v.AddArg2(x, v0)
24719 return true
24720 }
24721
24722
24723
24724 for {
24725 x := v_0
24726 if v_1.Op != OpAMD64ADDLconst {
24727 break
24728 }
24729 c := auxIntToInt32(v_1.AuxInt)
24730 y := v_1.Args[0]
24731 if !(c&63 == 0) {
24732 break
24733 }
24734 v.reset(OpAMD64SHLQ)
24735 v.AddArg2(x, y)
24736 return true
24737 }
24738
24739
24740
24741 for {
24742 x := v_0
24743 if v_1.Op != OpAMD64NEGL {
24744 break
24745 }
24746 t := v_1.Type
24747 v_1_0 := v_1.Args[0]
24748 if v_1_0.Op != OpAMD64ADDLconst {
24749 break
24750 }
24751 c := auxIntToInt32(v_1_0.AuxInt)
24752 y := v_1_0.Args[0]
24753 if !(c&63 == 0) {
24754 break
24755 }
24756 v.reset(OpAMD64SHLQ)
24757 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
24758 v0.AddArg(y)
24759 v.AddArg2(x, v0)
24760 return true
24761 }
24762
24763
24764
24765 for {
24766 x := v_0
24767 if v_1.Op != OpAMD64ANDLconst {
24768 break
24769 }
24770 c := auxIntToInt32(v_1.AuxInt)
24771 y := v_1.Args[0]
24772 if !(c&63 == 63) {
24773 break
24774 }
24775 v.reset(OpAMD64SHLQ)
24776 v.AddArg2(x, y)
24777 return true
24778 }
24779
24780
24781
24782 for {
24783 x := v_0
24784 if v_1.Op != OpAMD64NEGL {
24785 break
24786 }
24787 t := v_1.Type
24788 v_1_0 := v_1.Args[0]
24789 if v_1_0.Op != OpAMD64ANDLconst {
24790 break
24791 }
24792 c := auxIntToInt32(v_1_0.AuxInt)
24793 y := v_1_0.Args[0]
24794 if !(c&63 == 63) {
24795 break
24796 }
24797 v.reset(OpAMD64SHLQ)
24798 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
24799 v0.AddArg(y)
24800 v.AddArg2(x, v0)
24801 return true
24802 }
24803 return false
24804 }
24805 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
24806 v_0 := v.Args[0]
24807
24808
24809 for {
24810 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
24811 break
24812 }
24813 x := v_0.Args[0]
24814 v.reset(OpAMD64BTRQconst)
24815 v.AuxInt = int8ToAuxInt(0)
24816 v.AddArg(x)
24817 return true
24818 }
24819
24820
24821 for {
24822 if auxIntToInt8(v.AuxInt) != 0 {
24823 break
24824 }
24825 x := v_0
24826 v.copyOf(x)
24827 return true
24828 }
24829
24830
24831 for {
24832 d := auxIntToInt8(v.AuxInt)
24833 if v_0.Op != OpAMD64MOVQconst {
24834 break
24835 }
24836 c := auxIntToInt64(v_0.AuxInt)
24837 v.reset(OpAMD64MOVQconst)
24838 v.AuxInt = int64ToAuxInt(c << uint64(d))
24839 return true
24840 }
24841
24842
24843 for {
24844 d := auxIntToInt8(v.AuxInt)
24845 if v_0.Op != OpAMD64MOVLconst {
24846 break
24847 }
24848 c := auxIntToInt32(v_0.AuxInt)
24849 v.reset(OpAMD64MOVQconst)
24850 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
24851 return true
24852 }
24853 return false
24854 }
24855 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
24856 v_1 := v.Args[1]
24857 v_0 := v.Args[0]
24858
24859
24860
24861 for {
24862 x := v_0
24863 if v_1.Op != OpAMD64MOVQconst {
24864 break
24865 }
24866 c := auxIntToInt64(v_1.AuxInt)
24867 if !(c&31 < 8) {
24868 break
24869 }
24870 v.reset(OpAMD64SHRBconst)
24871 v.AuxInt = int8ToAuxInt(int8(c & 31))
24872 v.AddArg(x)
24873 return true
24874 }
24875
24876
24877
24878 for {
24879 x := v_0
24880 if v_1.Op != OpAMD64MOVLconst {
24881 break
24882 }
24883 c := auxIntToInt32(v_1.AuxInt)
24884 if !(c&31 < 8) {
24885 break
24886 }
24887 v.reset(OpAMD64SHRBconst)
24888 v.AuxInt = int8ToAuxInt(int8(c & 31))
24889 v.AddArg(x)
24890 return true
24891 }
24892
24893
24894
24895 for {
24896 if v_1.Op != OpAMD64MOVQconst {
24897 break
24898 }
24899 c := auxIntToInt64(v_1.AuxInt)
24900 if !(c&31 >= 8) {
24901 break
24902 }
24903 v.reset(OpAMD64MOVLconst)
24904 v.AuxInt = int32ToAuxInt(0)
24905 return true
24906 }
24907
24908
24909
24910 for {
24911 if v_1.Op != OpAMD64MOVLconst {
24912 break
24913 }
24914 c := auxIntToInt32(v_1.AuxInt)
24915 if !(c&31 >= 8) {
24916 break
24917 }
24918 v.reset(OpAMD64MOVLconst)
24919 v.AuxInt = int32ToAuxInt(0)
24920 return true
24921 }
24922 return false
24923 }
24924 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
24925 v_0 := v.Args[0]
24926
24927
24928 for {
24929 if auxIntToInt8(v.AuxInt) != 0 {
24930 break
24931 }
24932 x := v_0
24933 v.copyOf(x)
24934 return true
24935 }
24936 return false
24937 }
24938 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
24939 v_1 := v.Args[1]
24940 v_0 := v.Args[0]
24941 b := v.Block
24942
24943
24944 for {
24945 x := v_0
24946 if v_1.Op != OpAMD64MOVQconst {
24947 break
24948 }
24949 c := auxIntToInt64(v_1.AuxInt)
24950 v.reset(OpAMD64SHRLconst)
24951 v.AuxInt = int8ToAuxInt(int8(c & 31))
24952 v.AddArg(x)
24953 return true
24954 }
24955
24956
24957 for {
24958 x := v_0
24959 if v_1.Op != OpAMD64MOVLconst {
24960 break
24961 }
24962 c := auxIntToInt32(v_1.AuxInt)
24963 v.reset(OpAMD64SHRLconst)
24964 v.AuxInt = int8ToAuxInt(int8(c & 31))
24965 v.AddArg(x)
24966 return true
24967 }
24968
24969
24970
24971 for {
24972 x := v_0
24973 if v_1.Op != OpAMD64ADDQconst {
24974 break
24975 }
24976 c := auxIntToInt32(v_1.AuxInt)
24977 y := v_1.Args[0]
24978 if !(c&31 == 0) {
24979 break
24980 }
24981 v.reset(OpAMD64SHRL)
24982 v.AddArg2(x, y)
24983 return true
24984 }
24985
24986
24987
24988 for {
24989 x := v_0
24990 if v_1.Op != OpAMD64NEGQ {
24991 break
24992 }
24993 t := v_1.Type
24994 v_1_0 := v_1.Args[0]
24995 if v_1_0.Op != OpAMD64ADDQconst {
24996 break
24997 }
24998 c := auxIntToInt32(v_1_0.AuxInt)
24999 y := v_1_0.Args[0]
25000 if !(c&31 == 0) {
25001 break
25002 }
25003 v.reset(OpAMD64SHRL)
25004 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25005 v0.AddArg(y)
25006 v.AddArg2(x, v0)
25007 return true
25008 }
25009
25010
25011
25012 for {
25013 x := v_0
25014 if v_1.Op != OpAMD64ANDQconst {
25015 break
25016 }
25017 c := auxIntToInt32(v_1.AuxInt)
25018 y := v_1.Args[0]
25019 if !(c&31 == 31) {
25020 break
25021 }
25022 v.reset(OpAMD64SHRL)
25023 v.AddArg2(x, y)
25024 return true
25025 }
25026
25027
25028
25029 for {
25030 x := v_0
25031 if v_1.Op != OpAMD64NEGQ {
25032 break
25033 }
25034 t := v_1.Type
25035 v_1_0 := v_1.Args[0]
25036 if v_1_0.Op != OpAMD64ANDQconst {
25037 break
25038 }
25039 c := auxIntToInt32(v_1_0.AuxInt)
25040 y := v_1_0.Args[0]
25041 if !(c&31 == 31) {
25042 break
25043 }
25044 v.reset(OpAMD64SHRL)
25045 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25046 v0.AddArg(y)
25047 v.AddArg2(x, v0)
25048 return true
25049 }
25050
25051
25052
25053 for {
25054 x := v_0
25055 if v_1.Op != OpAMD64ADDLconst {
25056 break
25057 }
25058 c := auxIntToInt32(v_1.AuxInt)
25059 y := v_1.Args[0]
25060 if !(c&31 == 0) {
25061 break
25062 }
25063 v.reset(OpAMD64SHRL)
25064 v.AddArg2(x, y)
25065 return true
25066 }
25067
25068
25069
25070 for {
25071 x := v_0
25072 if v_1.Op != OpAMD64NEGL {
25073 break
25074 }
25075 t := v_1.Type
25076 v_1_0 := v_1.Args[0]
25077 if v_1_0.Op != OpAMD64ADDLconst {
25078 break
25079 }
25080 c := auxIntToInt32(v_1_0.AuxInt)
25081 y := v_1_0.Args[0]
25082 if !(c&31 == 0) {
25083 break
25084 }
25085 v.reset(OpAMD64SHRL)
25086 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25087 v0.AddArg(y)
25088 v.AddArg2(x, v0)
25089 return true
25090 }
25091
25092
25093
25094 for {
25095 x := v_0
25096 if v_1.Op != OpAMD64ANDLconst {
25097 break
25098 }
25099 c := auxIntToInt32(v_1.AuxInt)
25100 y := v_1.Args[0]
25101 if !(c&31 == 31) {
25102 break
25103 }
25104 v.reset(OpAMD64SHRL)
25105 v.AddArg2(x, y)
25106 return true
25107 }
25108
25109
25110
25111 for {
25112 x := v_0
25113 if v_1.Op != OpAMD64NEGL {
25114 break
25115 }
25116 t := v_1.Type
25117 v_1_0 := v_1.Args[0]
25118 if v_1_0.Op != OpAMD64ANDLconst {
25119 break
25120 }
25121 c := auxIntToInt32(v_1_0.AuxInt)
25122 y := v_1_0.Args[0]
25123 if !(c&31 == 31) {
25124 break
25125 }
25126 v.reset(OpAMD64SHRL)
25127 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25128 v0.AddArg(y)
25129 v.AddArg2(x, v0)
25130 return true
25131 }
25132 return false
25133 }
25134 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
25135 v_0 := v.Args[0]
25136
25137
25138 for {
25139 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
25140 break
25141 }
25142 x := v_0.Args[0]
25143 v.reset(OpAMD64BTRLconst)
25144 v.AuxInt = int8ToAuxInt(31)
25145 v.AddArg(x)
25146 return true
25147 }
25148
25149
25150 for {
25151 if auxIntToInt8(v.AuxInt) != 0 {
25152 break
25153 }
25154 x := v_0
25155 v.copyOf(x)
25156 return true
25157 }
25158 return false
25159 }
25160 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
25161 v_1 := v.Args[1]
25162 v_0 := v.Args[0]
25163 b := v.Block
25164
25165
25166 for {
25167 x := v_0
25168 if v_1.Op != OpAMD64MOVQconst {
25169 break
25170 }
25171 c := auxIntToInt64(v_1.AuxInt)
25172 v.reset(OpAMD64SHRQconst)
25173 v.AuxInt = int8ToAuxInt(int8(c & 63))
25174 v.AddArg(x)
25175 return true
25176 }
25177
25178
25179 for {
25180 x := v_0
25181 if v_1.Op != OpAMD64MOVLconst {
25182 break
25183 }
25184 c := auxIntToInt32(v_1.AuxInt)
25185 v.reset(OpAMD64SHRQconst)
25186 v.AuxInt = int8ToAuxInt(int8(c & 63))
25187 v.AddArg(x)
25188 return true
25189 }
25190
25191
25192
25193 for {
25194 x := v_0
25195 if v_1.Op != OpAMD64ADDQconst {
25196 break
25197 }
25198 c := auxIntToInt32(v_1.AuxInt)
25199 y := v_1.Args[0]
25200 if !(c&63 == 0) {
25201 break
25202 }
25203 v.reset(OpAMD64SHRQ)
25204 v.AddArg2(x, y)
25205 return true
25206 }
25207
25208
25209
25210 for {
25211 x := v_0
25212 if v_1.Op != OpAMD64NEGQ {
25213 break
25214 }
25215 t := v_1.Type
25216 v_1_0 := v_1.Args[0]
25217 if v_1_0.Op != OpAMD64ADDQconst {
25218 break
25219 }
25220 c := auxIntToInt32(v_1_0.AuxInt)
25221 y := v_1_0.Args[0]
25222 if !(c&63 == 0) {
25223 break
25224 }
25225 v.reset(OpAMD64SHRQ)
25226 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25227 v0.AddArg(y)
25228 v.AddArg2(x, v0)
25229 return true
25230 }
25231
25232
25233
25234 for {
25235 x := v_0
25236 if v_1.Op != OpAMD64ANDQconst {
25237 break
25238 }
25239 c := auxIntToInt32(v_1.AuxInt)
25240 y := v_1.Args[0]
25241 if !(c&63 == 63) {
25242 break
25243 }
25244 v.reset(OpAMD64SHRQ)
25245 v.AddArg2(x, y)
25246 return true
25247 }
25248
25249
25250
25251 for {
25252 x := v_0
25253 if v_1.Op != OpAMD64NEGQ {
25254 break
25255 }
25256 t := v_1.Type
25257 v_1_0 := v_1.Args[0]
25258 if v_1_0.Op != OpAMD64ANDQconst {
25259 break
25260 }
25261 c := auxIntToInt32(v_1_0.AuxInt)
25262 y := v_1_0.Args[0]
25263 if !(c&63 == 63) {
25264 break
25265 }
25266 v.reset(OpAMD64SHRQ)
25267 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
25268 v0.AddArg(y)
25269 v.AddArg2(x, v0)
25270 return true
25271 }
25272
25273
25274
25275 for {
25276 x := v_0
25277 if v_1.Op != OpAMD64ADDLconst {
25278 break
25279 }
25280 c := auxIntToInt32(v_1.AuxInt)
25281 y := v_1.Args[0]
25282 if !(c&63 == 0) {
25283 break
25284 }
25285 v.reset(OpAMD64SHRQ)
25286 v.AddArg2(x, y)
25287 return true
25288 }
25289
25290
25291
25292 for {
25293 x := v_0
25294 if v_1.Op != OpAMD64NEGL {
25295 break
25296 }
25297 t := v_1.Type
25298 v_1_0 := v_1.Args[0]
25299 if v_1_0.Op != OpAMD64ADDLconst {
25300 break
25301 }
25302 c := auxIntToInt32(v_1_0.AuxInt)
25303 y := v_1_0.Args[0]
25304 if !(c&63 == 0) {
25305 break
25306 }
25307 v.reset(OpAMD64SHRQ)
25308 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25309 v0.AddArg(y)
25310 v.AddArg2(x, v0)
25311 return true
25312 }
25313
25314
25315
25316 for {
25317 x := v_0
25318 if v_1.Op != OpAMD64ANDLconst {
25319 break
25320 }
25321 c := auxIntToInt32(v_1.AuxInt)
25322 y := v_1.Args[0]
25323 if !(c&63 == 63) {
25324 break
25325 }
25326 v.reset(OpAMD64SHRQ)
25327 v.AddArg2(x, y)
25328 return true
25329 }
25330
25331
25332
25333 for {
25334 x := v_0
25335 if v_1.Op != OpAMD64NEGL {
25336 break
25337 }
25338 t := v_1.Type
25339 v_1_0 := v_1.Args[0]
25340 if v_1_0.Op != OpAMD64ANDLconst {
25341 break
25342 }
25343 c := auxIntToInt32(v_1_0.AuxInt)
25344 y := v_1_0.Args[0]
25345 if !(c&63 == 63) {
25346 break
25347 }
25348 v.reset(OpAMD64SHRQ)
25349 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
25350 v0.AddArg(y)
25351 v.AddArg2(x, v0)
25352 return true
25353 }
25354 return false
25355 }
25356 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
25357 v_0 := v.Args[0]
25358
25359
25360 for {
25361 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
25362 break
25363 }
25364 x := v_0.Args[0]
25365 v.reset(OpAMD64BTRQconst)
25366 v.AuxInt = int8ToAuxInt(63)
25367 v.AddArg(x)
25368 return true
25369 }
25370
25371
25372 for {
25373 if auxIntToInt8(v.AuxInt) != 0 {
25374 break
25375 }
25376 x := v_0
25377 v.copyOf(x)
25378 return true
25379 }
25380 return false
25381 }
25382 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
25383 v_1 := v.Args[1]
25384 v_0 := v.Args[0]
25385
25386
25387
25388 for {
25389 x := v_0
25390 if v_1.Op != OpAMD64MOVQconst {
25391 break
25392 }
25393 c := auxIntToInt64(v_1.AuxInt)
25394 if !(c&31 < 16) {
25395 break
25396 }
25397 v.reset(OpAMD64SHRWconst)
25398 v.AuxInt = int8ToAuxInt(int8(c & 31))
25399 v.AddArg(x)
25400 return true
25401 }
25402
25403
25404
25405 for {
25406 x := v_0
25407 if v_1.Op != OpAMD64MOVLconst {
25408 break
25409 }
25410 c := auxIntToInt32(v_1.AuxInt)
25411 if !(c&31 < 16) {
25412 break
25413 }
25414 v.reset(OpAMD64SHRWconst)
25415 v.AuxInt = int8ToAuxInt(int8(c & 31))
25416 v.AddArg(x)
25417 return true
25418 }
25419
25420
25421
25422 for {
25423 if v_1.Op != OpAMD64MOVQconst {
25424 break
25425 }
25426 c := auxIntToInt64(v_1.AuxInt)
25427 if !(c&31 >= 16) {
25428 break
25429 }
25430 v.reset(OpAMD64MOVLconst)
25431 v.AuxInt = int32ToAuxInt(0)
25432 return true
25433 }
25434
25435
25436
25437 for {
25438 if v_1.Op != OpAMD64MOVLconst {
25439 break
25440 }
25441 c := auxIntToInt32(v_1.AuxInt)
25442 if !(c&31 >= 16) {
25443 break
25444 }
25445 v.reset(OpAMD64MOVLconst)
25446 v.AuxInt = int32ToAuxInt(0)
25447 return true
25448 }
25449 return false
25450 }
25451 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
25452 v_0 := v.Args[0]
25453
25454
25455 for {
25456 if auxIntToInt8(v.AuxInt) != 0 {
25457 break
25458 }
25459 x := v_0
25460 v.copyOf(x)
25461 return true
25462 }
25463 return false
25464 }
25465 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
25466 v_1 := v.Args[1]
25467 v_0 := v.Args[0]
25468 b := v.Block
25469
25470
25471 for {
25472 x := v_0
25473 if v_1.Op != OpAMD64MOVLconst {
25474 break
25475 }
25476 c := auxIntToInt32(v_1.AuxInt)
25477 v.reset(OpAMD64SUBLconst)
25478 v.AuxInt = int32ToAuxInt(c)
25479 v.AddArg(x)
25480 return true
25481 }
25482
25483
25484 for {
25485 if v_0.Op != OpAMD64MOVLconst {
25486 break
25487 }
25488 c := auxIntToInt32(v_0.AuxInt)
25489 x := v_1
25490 v.reset(OpAMD64NEGL)
25491 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
25492 v0.AuxInt = int32ToAuxInt(c)
25493 v0.AddArg(x)
25494 v.AddArg(v0)
25495 return true
25496 }
25497
25498
25499 for {
25500 x := v_0
25501 if x != v_1 {
25502 break
25503 }
25504 v.reset(OpAMD64MOVLconst)
25505 v.AuxInt = int32ToAuxInt(0)
25506 return true
25507 }
25508
25509
25510
25511 for {
25512 x := v_0
25513 l := v_1
25514 if l.Op != OpAMD64MOVLload {
25515 break
25516 }
25517 off := auxIntToInt32(l.AuxInt)
25518 sym := auxToSym(l.Aux)
25519 mem := l.Args[1]
25520 ptr := l.Args[0]
25521 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25522 break
25523 }
25524 v.reset(OpAMD64SUBLload)
25525 v.AuxInt = int32ToAuxInt(off)
25526 v.Aux = symToAux(sym)
25527 v.AddArg3(x, ptr, mem)
25528 return true
25529 }
25530 return false
25531 }
25532 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
25533 v_0 := v.Args[0]
25534
25535
25536
25537 for {
25538 c := auxIntToInt32(v.AuxInt)
25539 x := v_0
25540 if !(c == 0) {
25541 break
25542 }
25543 v.copyOf(x)
25544 return true
25545 }
25546
25547
25548 for {
25549 c := auxIntToInt32(v.AuxInt)
25550 x := v_0
25551 v.reset(OpAMD64ADDLconst)
25552 v.AuxInt = int32ToAuxInt(-c)
25553 v.AddArg(x)
25554 return true
25555 }
25556 }
25557 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
25558 v_2 := v.Args[2]
25559 v_1 := v.Args[1]
25560 v_0 := v.Args[0]
25561 b := v.Block
25562 typ := &b.Func.Config.Types
25563
25564
25565
25566 for {
25567 off1 := auxIntToInt32(v.AuxInt)
25568 sym := auxToSym(v.Aux)
25569 val := v_0
25570 if v_1.Op != OpAMD64ADDQconst {
25571 break
25572 }
25573 off2 := auxIntToInt32(v_1.AuxInt)
25574 base := v_1.Args[0]
25575 mem := v_2
25576 if !(is32Bit(int64(off1) + int64(off2))) {
25577 break
25578 }
25579 v.reset(OpAMD64SUBLload)
25580 v.AuxInt = int32ToAuxInt(off1 + off2)
25581 v.Aux = symToAux(sym)
25582 v.AddArg3(val, base, mem)
25583 return true
25584 }
25585
25586
25587
25588 for {
25589 off1 := auxIntToInt32(v.AuxInt)
25590 sym1 := auxToSym(v.Aux)
25591 val := v_0
25592 if v_1.Op != OpAMD64LEAQ {
25593 break
25594 }
25595 off2 := auxIntToInt32(v_1.AuxInt)
25596 sym2 := auxToSym(v_1.Aux)
25597 base := v_1.Args[0]
25598 mem := v_2
25599 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
25600 break
25601 }
25602 v.reset(OpAMD64SUBLload)
25603 v.AuxInt = int32ToAuxInt(off1 + off2)
25604 v.Aux = symToAux(mergeSym(sym1, sym2))
25605 v.AddArg3(val, base, mem)
25606 return true
25607 }
25608
25609
25610 for {
25611 off := auxIntToInt32(v.AuxInt)
25612 sym := auxToSym(v.Aux)
25613 x := v_0
25614 ptr := v_1
25615 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
25616 break
25617 }
25618 y := v_2.Args[1]
25619 if ptr != v_2.Args[0] {
25620 break
25621 }
25622 v.reset(OpAMD64SUBL)
25623 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
25624 v0.AddArg(y)
25625 v.AddArg2(x, v0)
25626 return true
25627 }
25628 return false
25629 }
25630 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
25631 v_2 := v.Args[2]
25632 v_1 := v.Args[1]
25633 v_0 := v.Args[0]
25634
25635
25636
25637 for {
25638 off1 := auxIntToInt32(v.AuxInt)
25639 sym := auxToSym(v.Aux)
25640 if v_0.Op != OpAMD64ADDQconst {
25641 break
25642 }
25643 off2 := auxIntToInt32(v_0.AuxInt)
25644 base := v_0.Args[0]
25645 val := v_1
25646 mem := v_2
25647 if !(is32Bit(int64(off1) + int64(off2))) {
25648 break
25649 }
25650 v.reset(OpAMD64SUBLmodify)
25651 v.AuxInt = int32ToAuxInt(off1 + off2)
25652 v.Aux = symToAux(sym)
25653 v.AddArg3(base, val, mem)
25654 return true
25655 }
25656
25657
25658
25659 for {
25660 off1 := auxIntToInt32(v.AuxInt)
25661 sym1 := auxToSym(v.Aux)
25662 if v_0.Op != OpAMD64LEAQ {
25663 break
25664 }
25665 off2 := auxIntToInt32(v_0.AuxInt)
25666 sym2 := auxToSym(v_0.Aux)
25667 base := v_0.Args[0]
25668 val := v_1
25669 mem := v_2
25670 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
25671 break
25672 }
25673 v.reset(OpAMD64SUBLmodify)
25674 v.AuxInt = int32ToAuxInt(off1 + off2)
25675 v.Aux = symToAux(mergeSym(sym1, sym2))
25676 v.AddArg3(base, val, mem)
25677 return true
25678 }
25679 return false
25680 }
25681 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
25682 v_1 := v.Args[1]
25683 v_0 := v.Args[0]
25684 b := v.Block
25685
25686
25687
25688 for {
25689 x := v_0
25690 if v_1.Op != OpAMD64MOVQconst {
25691 break
25692 }
25693 c := auxIntToInt64(v_1.AuxInt)
25694 if !(is32Bit(c)) {
25695 break
25696 }
25697 v.reset(OpAMD64SUBQconst)
25698 v.AuxInt = int32ToAuxInt(int32(c))
25699 v.AddArg(x)
25700 return true
25701 }
25702
25703
25704
25705 for {
25706 if v_0.Op != OpAMD64MOVQconst {
25707 break
25708 }
25709 c := auxIntToInt64(v_0.AuxInt)
25710 x := v_1
25711 if !(is32Bit(c)) {
25712 break
25713 }
25714 v.reset(OpAMD64NEGQ)
25715 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
25716 v0.AuxInt = int32ToAuxInt(int32(c))
25717 v0.AddArg(x)
25718 v.AddArg(v0)
25719 return true
25720 }
25721
25722
25723 for {
25724 x := v_0
25725 if x != v_1 {
25726 break
25727 }
25728 v.reset(OpAMD64MOVQconst)
25729 v.AuxInt = int64ToAuxInt(0)
25730 return true
25731 }
25732
25733
25734
25735 for {
25736 x := v_0
25737 l := v_1
25738 if l.Op != OpAMD64MOVQload {
25739 break
25740 }
25741 off := auxIntToInt32(l.AuxInt)
25742 sym := auxToSym(l.Aux)
25743 mem := l.Args[1]
25744 ptr := l.Args[0]
25745 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25746 break
25747 }
25748 v.reset(OpAMD64SUBQload)
25749 v.AuxInt = int32ToAuxInt(off)
25750 v.Aux = symToAux(sym)
25751 v.AddArg3(x, ptr, mem)
25752 return true
25753 }
25754 return false
25755 }
25756 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
25757 v_1 := v.Args[1]
25758 v_0 := v.Args[0]
25759
25760
25761
25762 for {
25763 x := v_0
25764 if v_1.Op != OpAMD64MOVQconst {
25765 break
25766 }
25767 c := auxIntToInt64(v_1.AuxInt)
25768 if !(is32Bit(c)) {
25769 break
25770 }
25771 v.reset(OpAMD64SUBQconstborrow)
25772 v.AuxInt = int32ToAuxInt(int32(c))
25773 v.AddArg(x)
25774 return true
25775 }
25776 return false
25777 }
25778 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
25779 v_0 := v.Args[0]
25780
25781
25782 for {
25783 if auxIntToInt32(v.AuxInt) != 0 {
25784 break
25785 }
25786 x := v_0
25787 v.copyOf(x)
25788 return true
25789 }
25790
25791
25792
25793 for {
25794 c := auxIntToInt32(v.AuxInt)
25795 x := v_0
25796 if !(c != -(1 << 31)) {
25797 break
25798 }
25799 v.reset(OpAMD64ADDQconst)
25800 v.AuxInt = int32ToAuxInt(-c)
25801 v.AddArg(x)
25802 return true
25803 }
25804
25805
25806 for {
25807 c := auxIntToInt32(v.AuxInt)
25808 if v_0.Op != OpAMD64MOVQconst {
25809 break
25810 }
25811 d := auxIntToInt64(v_0.AuxInt)
25812 v.reset(OpAMD64MOVQconst)
25813 v.AuxInt = int64ToAuxInt(d - int64(c))
25814 return true
25815 }
25816
25817
25818
25819 for {
25820 c := auxIntToInt32(v.AuxInt)
25821 if v_0.Op != OpAMD64SUBQconst {
25822 break
25823 }
25824 d := auxIntToInt32(v_0.AuxInt)
25825 x := v_0.Args[0]
25826 if !(is32Bit(int64(-c) - int64(d))) {
25827 break
25828 }
25829 v.reset(OpAMD64ADDQconst)
25830 v.AuxInt = int32ToAuxInt(-c - d)
25831 v.AddArg(x)
25832 return true
25833 }
25834 return false
25835 }
25836 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
25837 v_2 := v.Args[2]
25838 v_1 := v.Args[1]
25839 v_0 := v.Args[0]
25840 b := v.Block
25841 typ := &b.Func.Config.Types
25842
25843
25844
25845 for {
25846 off1 := auxIntToInt32(v.AuxInt)
25847 sym := auxToSym(v.Aux)
25848 val := v_0
25849 if v_1.Op != OpAMD64ADDQconst {
25850 break
25851 }
25852 off2 := auxIntToInt32(v_1.AuxInt)
25853 base := v_1.Args[0]
25854 mem := v_2
25855 if !(is32Bit(int64(off1) + int64(off2))) {
25856 break
25857 }
25858 v.reset(OpAMD64SUBQload)
25859 v.AuxInt = int32ToAuxInt(off1 + off2)
25860 v.Aux = symToAux(sym)
25861 v.AddArg3(val, base, mem)
25862 return true
25863 }
25864
25865
25866
25867 for {
25868 off1 := auxIntToInt32(v.AuxInt)
25869 sym1 := auxToSym(v.Aux)
25870 val := v_0
25871 if v_1.Op != OpAMD64LEAQ {
25872 break
25873 }
25874 off2 := auxIntToInt32(v_1.AuxInt)
25875 sym2 := auxToSym(v_1.Aux)
25876 base := v_1.Args[0]
25877 mem := v_2
25878 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
25879 break
25880 }
25881 v.reset(OpAMD64SUBQload)
25882 v.AuxInt = int32ToAuxInt(off1 + off2)
25883 v.Aux = symToAux(mergeSym(sym1, sym2))
25884 v.AddArg3(val, base, mem)
25885 return true
25886 }
25887
25888
25889 for {
25890 off := auxIntToInt32(v.AuxInt)
25891 sym := auxToSym(v.Aux)
25892 x := v_0
25893 ptr := v_1
25894 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
25895 break
25896 }
25897 y := v_2.Args[1]
25898 if ptr != v_2.Args[0] {
25899 break
25900 }
25901 v.reset(OpAMD64SUBQ)
25902 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
25903 v0.AddArg(y)
25904 v.AddArg2(x, v0)
25905 return true
25906 }
25907 return false
25908 }
25909 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
25910 v_2 := v.Args[2]
25911 v_1 := v.Args[1]
25912 v_0 := v.Args[0]
25913
25914
25915
25916 for {
25917 off1 := auxIntToInt32(v.AuxInt)
25918 sym := auxToSym(v.Aux)
25919 if v_0.Op != OpAMD64ADDQconst {
25920 break
25921 }
25922 off2 := auxIntToInt32(v_0.AuxInt)
25923 base := v_0.Args[0]
25924 val := v_1
25925 mem := v_2
25926 if !(is32Bit(int64(off1) + int64(off2))) {
25927 break
25928 }
25929 v.reset(OpAMD64SUBQmodify)
25930 v.AuxInt = int32ToAuxInt(off1 + off2)
25931 v.Aux = symToAux(sym)
25932 v.AddArg3(base, val, mem)
25933 return true
25934 }
25935
25936
25937
25938 for {
25939 off1 := auxIntToInt32(v.AuxInt)
25940 sym1 := auxToSym(v.Aux)
25941 if v_0.Op != OpAMD64LEAQ {
25942 break
25943 }
25944 off2 := auxIntToInt32(v_0.AuxInt)
25945 sym2 := auxToSym(v_0.Aux)
25946 base := v_0.Args[0]
25947 val := v_1
25948 mem := v_2
25949 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
25950 break
25951 }
25952 v.reset(OpAMD64SUBQmodify)
25953 v.AuxInt = int32ToAuxInt(off1 + off2)
25954 v.Aux = symToAux(mergeSym(sym1, sym2))
25955 v.AddArg3(base, val, mem)
25956 return true
25957 }
25958 return false
25959 }
25960 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
25961 v_1 := v.Args[1]
25962 v_0 := v.Args[0]
25963
25964
25965
25966 for {
25967 x := v_0
25968 l := v_1
25969 if l.Op != OpAMD64MOVSDload {
25970 break
25971 }
25972 off := auxIntToInt32(l.AuxInt)
25973 sym := auxToSym(l.Aux)
25974 mem := l.Args[1]
25975 ptr := l.Args[0]
25976 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25977 break
25978 }
25979 v.reset(OpAMD64SUBSDload)
25980 v.AuxInt = int32ToAuxInt(off)
25981 v.Aux = symToAux(sym)
25982 v.AddArg3(x, ptr, mem)
25983 return true
25984 }
25985 return false
25986 }
25987 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
25988 v_2 := v.Args[2]
25989 v_1 := v.Args[1]
25990 v_0 := v.Args[0]
25991 b := v.Block
25992 typ := &b.Func.Config.Types
25993
25994
25995
25996 for {
25997 off1 := auxIntToInt32(v.AuxInt)
25998 sym := auxToSym(v.Aux)
25999 val := v_0
26000 if v_1.Op != OpAMD64ADDQconst {
26001 break
26002 }
26003 off2 := auxIntToInt32(v_1.AuxInt)
26004 base := v_1.Args[0]
26005 mem := v_2
26006 if !(is32Bit(int64(off1) + int64(off2))) {
26007 break
26008 }
26009 v.reset(OpAMD64SUBSDload)
26010 v.AuxInt = int32ToAuxInt(off1 + off2)
26011 v.Aux = symToAux(sym)
26012 v.AddArg3(val, base, mem)
26013 return true
26014 }
26015
26016
26017
26018 for {
26019 off1 := auxIntToInt32(v.AuxInt)
26020 sym1 := auxToSym(v.Aux)
26021 val := v_0
26022 if v_1.Op != OpAMD64LEAQ {
26023 break
26024 }
26025 off2 := auxIntToInt32(v_1.AuxInt)
26026 sym2 := auxToSym(v_1.Aux)
26027 base := v_1.Args[0]
26028 mem := v_2
26029 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26030 break
26031 }
26032 v.reset(OpAMD64SUBSDload)
26033 v.AuxInt = int32ToAuxInt(off1 + off2)
26034 v.Aux = symToAux(mergeSym(sym1, sym2))
26035 v.AddArg3(val, base, mem)
26036 return true
26037 }
26038
26039
26040 for {
26041 off := auxIntToInt32(v.AuxInt)
26042 sym := auxToSym(v.Aux)
26043 x := v_0
26044 ptr := v_1
26045 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
26046 break
26047 }
26048 y := v_2.Args[1]
26049 if ptr != v_2.Args[0] {
26050 break
26051 }
26052 v.reset(OpAMD64SUBSD)
26053 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
26054 v0.AddArg(y)
26055 v.AddArg2(x, v0)
26056 return true
26057 }
26058 return false
26059 }
26060 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
26061 v_1 := v.Args[1]
26062 v_0 := v.Args[0]
26063
26064
26065
26066 for {
26067 x := v_0
26068 l := v_1
26069 if l.Op != OpAMD64MOVSSload {
26070 break
26071 }
26072 off := auxIntToInt32(l.AuxInt)
26073 sym := auxToSym(l.Aux)
26074 mem := l.Args[1]
26075 ptr := l.Args[0]
26076 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
26077 break
26078 }
26079 v.reset(OpAMD64SUBSSload)
26080 v.AuxInt = int32ToAuxInt(off)
26081 v.Aux = symToAux(sym)
26082 v.AddArg3(x, ptr, mem)
26083 return true
26084 }
26085 return false
26086 }
26087 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
26088 v_2 := v.Args[2]
26089 v_1 := v.Args[1]
26090 v_0 := v.Args[0]
26091 b := v.Block
26092 typ := &b.Func.Config.Types
26093
26094
26095
26096 for {
26097 off1 := auxIntToInt32(v.AuxInt)
26098 sym := auxToSym(v.Aux)
26099 val := v_0
26100 if v_1.Op != OpAMD64ADDQconst {
26101 break
26102 }
26103 off2 := auxIntToInt32(v_1.AuxInt)
26104 base := v_1.Args[0]
26105 mem := v_2
26106 if !(is32Bit(int64(off1) + int64(off2))) {
26107 break
26108 }
26109 v.reset(OpAMD64SUBSSload)
26110 v.AuxInt = int32ToAuxInt(off1 + off2)
26111 v.Aux = symToAux(sym)
26112 v.AddArg3(val, base, mem)
26113 return true
26114 }
26115
26116
26117
26118 for {
26119 off1 := auxIntToInt32(v.AuxInt)
26120 sym1 := auxToSym(v.Aux)
26121 val := v_0
26122 if v_1.Op != OpAMD64LEAQ {
26123 break
26124 }
26125 off2 := auxIntToInt32(v_1.AuxInt)
26126 sym2 := auxToSym(v_1.Aux)
26127 base := v_1.Args[0]
26128 mem := v_2
26129 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
26130 break
26131 }
26132 v.reset(OpAMD64SUBSSload)
26133 v.AuxInt = int32ToAuxInt(off1 + off2)
26134 v.Aux = symToAux(mergeSym(sym1, sym2))
26135 v.AddArg3(val, base, mem)
26136 return true
26137 }
26138
26139
26140 for {
26141 off := auxIntToInt32(v.AuxInt)
26142 sym := auxToSym(v.Aux)
26143 x := v_0
26144 ptr := v_1
26145 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
26146 break
26147 }
26148 y := v_2.Args[1]
26149 if ptr != v_2.Args[0] {
26150 break
26151 }
26152 v.reset(OpAMD64SUBSS)
26153 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
26154 v0.AddArg(y)
26155 v.AddArg2(x, v0)
26156 return true
26157 }
26158 return false
26159 }
26160 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
26161 v_1 := v.Args[1]
26162 v_0 := v.Args[0]
26163 b := v.Block
26164
26165
26166 for {
26167 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26168 if v_0.Op != OpAMD64MOVLconst {
26169 continue
26170 }
26171 c := auxIntToInt32(v_0.AuxInt)
26172 x := v_1
26173 v.reset(OpAMD64TESTBconst)
26174 v.AuxInt = int8ToAuxInt(int8(c))
26175 v.AddArg(x)
26176 return true
26177 }
26178 break
26179 }
26180
26181
26182
26183 for {
26184 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26185 l := v_0
26186 if l.Op != OpAMD64MOVBload {
26187 continue
26188 }
26189 off := auxIntToInt32(l.AuxInt)
26190 sym := auxToSym(l.Aux)
26191 mem := l.Args[1]
26192 ptr := l.Args[0]
26193 l2 := v_1
26194 if !(l == l2 && l.Uses == 2 && clobber(l)) {
26195 continue
26196 }
26197 b = l.Block
26198 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
26199 v.copyOf(v0)
26200 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
26201 v0.Aux = symToAux(sym)
26202 v0.AddArg2(ptr, mem)
26203 return true
26204 }
26205 break
26206 }
26207 return false
26208 }
26209 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
26210 v_0 := v.Args[0]
26211
26212
26213
26214 for {
26215 if auxIntToInt8(v.AuxInt) != -1 {
26216 break
26217 }
26218 x := v_0
26219 if !(x.Op != OpAMD64MOVLconst) {
26220 break
26221 }
26222 v.reset(OpAMD64TESTB)
26223 v.AddArg2(x, x)
26224 return true
26225 }
26226 return false
26227 }
26228 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
26229 v_1 := v.Args[1]
26230 v_0 := v.Args[0]
26231 b := v.Block
26232
26233
26234 for {
26235 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26236 if v_0.Op != OpAMD64MOVLconst {
26237 continue
26238 }
26239 c := auxIntToInt32(v_0.AuxInt)
26240 x := v_1
26241 v.reset(OpAMD64TESTLconst)
26242 v.AuxInt = int32ToAuxInt(c)
26243 v.AddArg(x)
26244 return true
26245 }
26246 break
26247 }
26248
26249
26250
26251 for {
26252 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26253 l := v_0
26254 if l.Op != OpAMD64MOVLload {
26255 continue
26256 }
26257 off := auxIntToInt32(l.AuxInt)
26258 sym := auxToSym(l.Aux)
26259 mem := l.Args[1]
26260 ptr := l.Args[0]
26261 l2 := v_1
26262 if !(l == l2 && l.Uses == 2 && clobber(l)) {
26263 continue
26264 }
26265 b = l.Block
26266 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
26267 v.copyOf(v0)
26268 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
26269 v0.Aux = symToAux(sym)
26270 v0.AddArg2(ptr, mem)
26271 return true
26272 }
26273 break
26274 }
26275
26276
26277
26278 for {
26279 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26280 a := v_0
26281 if a.Op != OpAMD64ANDLload {
26282 continue
26283 }
26284 off := auxIntToInt32(a.AuxInt)
26285 sym := auxToSym(a.Aux)
26286 mem := a.Args[2]
26287 x := a.Args[0]
26288 ptr := a.Args[1]
26289 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
26290 continue
26291 }
26292 v.reset(OpAMD64TESTL)
26293 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
26294 v0.AuxInt = int32ToAuxInt(off)
26295 v0.Aux = symToAux(sym)
26296 v0.AddArg2(ptr, mem)
26297 v.AddArg2(v0, x)
26298 return true
26299 }
26300 break
26301 }
26302 return false
26303 }
26304 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
26305 v_0 := v.Args[0]
26306
26307
26308
26309 for {
26310 c := auxIntToInt32(v.AuxInt)
26311 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
26312 break
26313 }
26314 v.reset(OpAMD64FlagEQ)
26315 return true
26316 }
26317
26318
26319
26320 for {
26321 c := auxIntToInt32(v.AuxInt)
26322 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
26323 break
26324 }
26325 v.reset(OpAMD64FlagLT_UGT)
26326 return true
26327 }
26328
26329
26330
26331 for {
26332 c := auxIntToInt32(v.AuxInt)
26333 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
26334 break
26335 }
26336 v.reset(OpAMD64FlagGT_UGT)
26337 return true
26338 }
26339
26340
26341
26342 for {
26343 if auxIntToInt32(v.AuxInt) != -1 {
26344 break
26345 }
26346 x := v_0
26347 if !(x.Op != OpAMD64MOVLconst) {
26348 break
26349 }
26350 v.reset(OpAMD64TESTL)
26351 v.AddArg2(x, x)
26352 return true
26353 }
26354 return false
26355 }
26356 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
26357 v_1 := v.Args[1]
26358 v_0 := v.Args[0]
26359 b := v.Block
26360
26361
26362
26363 for {
26364 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26365 if v_0.Op != OpAMD64MOVQconst {
26366 continue
26367 }
26368 c := auxIntToInt64(v_0.AuxInt)
26369 x := v_1
26370 if !(is32Bit(c)) {
26371 continue
26372 }
26373 v.reset(OpAMD64TESTQconst)
26374 v.AuxInt = int32ToAuxInt(int32(c))
26375 v.AddArg(x)
26376 return true
26377 }
26378 break
26379 }
26380
26381
26382
26383 for {
26384 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26385 l := v_0
26386 if l.Op != OpAMD64MOVQload {
26387 continue
26388 }
26389 off := auxIntToInt32(l.AuxInt)
26390 sym := auxToSym(l.Aux)
26391 mem := l.Args[1]
26392 ptr := l.Args[0]
26393 l2 := v_1
26394 if !(l == l2 && l.Uses == 2 && clobber(l)) {
26395 continue
26396 }
26397 b = l.Block
26398 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
26399 v.copyOf(v0)
26400 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
26401 v0.Aux = symToAux(sym)
26402 v0.AddArg2(ptr, mem)
26403 return true
26404 }
26405 break
26406 }
26407
26408
26409
26410 for {
26411 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26412 a := v_0
26413 if a.Op != OpAMD64ANDQload {
26414 continue
26415 }
26416 off := auxIntToInt32(a.AuxInt)
26417 sym := auxToSym(a.Aux)
26418 mem := a.Args[2]
26419 x := a.Args[0]
26420 ptr := a.Args[1]
26421 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
26422 continue
26423 }
26424 v.reset(OpAMD64TESTQ)
26425 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
26426 v0.AuxInt = int32ToAuxInt(off)
26427 v0.Aux = symToAux(sym)
26428 v0.AddArg2(ptr, mem)
26429 v.AddArg2(v0, x)
26430 return true
26431 }
26432 break
26433 }
26434 return false
26435 }
26436 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
26437 v_0 := v.Args[0]
26438
26439
26440
26441 for {
26442 c := auxIntToInt32(v.AuxInt)
26443 if v_0.Op != OpAMD64MOVQconst {
26444 break
26445 }
26446 d := auxIntToInt64(v_0.AuxInt)
26447 if !(int64(c) == d && c == 0) {
26448 break
26449 }
26450 v.reset(OpAMD64FlagEQ)
26451 return true
26452 }
26453
26454
26455
26456 for {
26457 c := auxIntToInt32(v.AuxInt)
26458 if v_0.Op != OpAMD64MOVQconst {
26459 break
26460 }
26461 d := auxIntToInt64(v_0.AuxInt)
26462 if !(int64(c) == d && c < 0) {
26463 break
26464 }
26465 v.reset(OpAMD64FlagLT_UGT)
26466 return true
26467 }
26468
26469
26470
26471 for {
26472 c := auxIntToInt32(v.AuxInt)
26473 if v_0.Op != OpAMD64MOVQconst {
26474 break
26475 }
26476 d := auxIntToInt64(v_0.AuxInt)
26477 if !(int64(c) == d && c > 0) {
26478 break
26479 }
26480 v.reset(OpAMD64FlagGT_UGT)
26481 return true
26482 }
26483
26484
26485
26486 for {
26487 if auxIntToInt32(v.AuxInt) != -1 {
26488 break
26489 }
26490 x := v_0
26491 if !(x.Op != OpAMD64MOVQconst) {
26492 break
26493 }
26494 v.reset(OpAMD64TESTQ)
26495 v.AddArg2(x, x)
26496 return true
26497 }
26498 return false
26499 }
26500 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
26501 v_1 := v.Args[1]
26502 v_0 := v.Args[0]
26503 b := v.Block
26504
26505
26506 for {
26507 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26508 if v_0.Op != OpAMD64MOVLconst {
26509 continue
26510 }
26511 c := auxIntToInt32(v_0.AuxInt)
26512 x := v_1
26513 v.reset(OpAMD64TESTWconst)
26514 v.AuxInt = int16ToAuxInt(int16(c))
26515 v.AddArg(x)
26516 return true
26517 }
26518 break
26519 }
26520
26521
26522
26523 for {
26524 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26525 l := v_0
26526 if l.Op != OpAMD64MOVWload {
26527 continue
26528 }
26529 off := auxIntToInt32(l.AuxInt)
26530 sym := auxToSym(l.Aux)
26531 mem := l.Args[1]
26532 ptr := l.Args[0]
26533 l2 := v_1
26534 if !(l == l2 && l.Uses == 2 && clobber(l)) {
26535 continue
26536 }
26537 b = l.Block
26538 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
26539 v.copyOf(v0)
26540 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
26541 v0.Aux = symToAux(sym)
26542 v0.AddArg2(ptr, mem)
26543 return true
26544 }
26545 break
26546 }
26547 return false
26548 }
26549 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
26550 v_0 := v.Args[0]
26551
26552
26553
26554 for {
26555 if auxIntToInt16(v.AuxInt) != -1 {
26556 break
26557 }
26558 x := v_0
26559 if !(x.Op != OpAMD64MOVLconst) {
26560 break
26561 }
26562 v.reset(OpAMD64TESTW)
26563 v.AddArg2(x, x)
26564 return true
26565 }
26566 return false
26567 }
26568 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
26569 v_2 := v.Args[2]
26570 v_1 := v.Args[1]
26571 v_0 := v.Args[0]
26572
26573
26574
26575 for {
26576 off1 := auxIntToInt32(v.AuxInt)
26577 sym := auxToSym(v.Aux)
26578 val := v_0
26579 if v_1.Op != OpAMD64ADDQconst {
26580 break
26581 }
26582 off2 := auxIntToInt32(v_1.AuxInt)
26583 ptr := v_1.Args[0]
26584 mem := v_2
26585 if !(is32Bit(int64(off1) + int64(off2))) {
26586 break
26587 }
26588 v.reset(OpAMD64XADDLlock)
26589 v.AuxInt = int32ToAuxInt(off1 + off2)
26590 v.Aux = symToAux(sym)
26591 v.AddArg3(val, ptr, mem)
26592 return true
26593 }
26594 return false
26595 }
26596 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
26597 v_2 := v.Args[2]
26598 v_1 := v.Args[1]
26599 v_0 := v.Args[0]
26600
26601
26602
26603 for {
26604 off1 := auxIntToInt32(v.AuxInt)
26605 sym := auxToSym(v.Aux)
26606 val := v_0
26607 if v_1.Op != OpAMD64ADDQconst {
26608 break
26609 }
26610 off2 := auxIntToInt32(v_1.AuxInt)
26611 ptr := v_1.Args[0]
26612 mem := v_2
26613 if !(is32Bit(int64(off1) + int64(off2))) {
26614 break
26615 }
26616 v.reset(OpAMD64XADDQlock)
26617 v.AuxInt = int32ToAuxInt(off1 + off2)
26618 v.Aux = symToAux(sym)
26619 v.AddArg3(val, ptr, mem)
26620 return true
26621 }
26622 return false
26623 }
26624 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
26625 v_2 := v.Args[2]
26626 v_1 := v.Args[1]
26627 v_0 := v.Args[0]
26628
26629
26630
26631 for {
26632 off1 := auxIntToInt32(v.AuxInt)
26633 sym := auxToSym(v.Aux)
26634 val := v_0
26635 if v_1.Op != OpAMD64ADDQconst {
26636 break
26637 }
26638 off2 := auxIntToInt32(v_1.AuxInt)
26639 ptr := v_1.Args[0]
26640 mem := v_2
26641 if !(is32Bit(int64(off1) + int64(off2))) {
26642 break
26643 }
26644 v.reset(OpAMD64XCHGL)
26645 v.AuxInt = int32ToAuxInt(off1 + off2)
26646 v.Aux = symToAux(sym)
26647 v.AddArg3(val, ptr, mem)
26648 return true
26649 }
26650
26651
26652
26653 for {
26654 off1 := auxIntToInt32(v.AuxInt)
26655 sym1 := auxToSym(v.Aux)
26656 val := v_0
26657 if v_1.Op != OpAMD64LEAQ {
26658 break
26659 }
26660 off2 := auxIntToInt32(v_1.AuxInt)
26661 sym2 := auxToSym(v_1.Aux)
26662 ptr := v_1.Args[0]
26663 mem := v_2
26664 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
26665 break
26666 }
26667 v.reset(OpAMD64XCHGL)
26668 v.AuxInt = int32ToAuxInt(off1 + off2)
26669 v.Aux = symToAux(mergeSym(sym1, sym2))
26670 v.AddArg3(val, ptr, mem)
26671 return true
26672 }
26673 return false
26674 }
26675 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
26676 v_2 := v.Args[2]
26677 v_1 := v.Args[1]
26678 v_0 := v.Args[0]
26679
26680
26681
26682 for {
26683 off1 := auxIntToInt32(v.AuxInt)
26684 sym := auxToSym(v.Aux)
26685 val := v_0
26686 if v_1.Op != OpAMD64ADDQconst {
26687 break
26688 }
26689 off2 := auxIntToInt32(v_1.AuxInt)
26690 ptr := v_1.Args[0]
26691 mem := v_2
26692 if !(is32Bit(int64(off1) + int64(off2))) {
26693 break
26694 }
26695 v.reset(OpAMD64XCHGQ)
26696 v.AuxInt = int32ToAuxInt(off1 + off2)
26697 v.Aux = symToAux(sym)
26698 v.AddArg3(val, ptr, mem)
26699 return true
26700 }
26701
26702
26703
26704 for {
26705 off1 := auxIntToInt32(v.AuxInt)
26706 sym1 := auxToSym(v.Aux)
26707 val := v_0
26708 if v_1.Op != OpAMD64LEAQ {
26709 break
26710 }
26711 off2 := auxIntToInt32(v_1.AuxInt)
26712 sym2 := auxToSym(v_1.Aux)
26713 ptr := v_1.Args[0]
26714 mem := v_2
26715 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
26716 break
26717 }
26718 v.reset(OpAMD64XCHGQ)
26719 v.AuxInt = int32ToAuxInt(off1 + off2)
26720 v.Aux = symToAux(mergeSym(sym1, sym2))
26721 v.AddArg3(val, ptr, mem)
26722 return true
26723 }
26724 return false
26725 }
26726 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
26727 v_1 := v.Args[1]
26728 v_0 := v.Args[0]
26729
26730
26731 for {
26732 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26733 if v_0.Op != OpAMD64SHLL {
26734 continue
26735 }
26736 y := v_0.Args[1]
26737 v_0_0 := v_0.Args[0]
26738 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
26739 continue
26740 }
26741 x := v_1
26742 v.reset(OpAMD64BTCL)
26743 v.AddArg2(x, y)
26744 return true
26745 }
26746 break
26747 }
26748
26749
26750
26751 for {
26752 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26753 if v_0.Op != OpAMD64MOVLconst {
26754 continue
26755 }
26756 c := auxIntToInt32(v_0.AuxInt)
26757 x := v_1
26758 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
26759 continue
26760 }
26761 v.reset(OpAMD64BTCLconst)
26762 v.AuxInt = int8ToAuxInt(int8(log32(c)))
26763 v.AddArg(x)
26764 return true
26765 }
26766 break
26767 }
26768
26769
26770 for {
26771 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26772 x := v_0
26773 if v_1.Op != OpAMD64MOVLconst {
26774 continue
26775 }
26776 c := auxIntToInt32(v_1.AuxInt)
26777 v.reset(OpAMD64XORLconst)
26778 v.AuxInt = int32ToAuxInt(c)
26779 v.AddArg(x)
26780 return true
26781 }
26782 break
26783 }
26784
26785
26786
26787 for {
26788 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26789 if v_0.Op != OpAMD64SHLLconst {
26790 continue
26791 }
26792 c := auxIntToInt8(v_0.AuxInt)
26793 x := v_0.Args[0]
26794 if v_1.Op != OpAMD64SHRLconst {
26795 continue
26796 }
26797 d := auxIntToInt8(v_1.AuxInt)
26798 if x != v_1.Args[0] || !(d == 32-c) {
26799 continue
26800 }
26801 v.reset(OpAMD64ROLLconst)
26802 v.AuxInt = int8ToAuxInt(c)
26803 v.AddArg(x)
26804 return true
26805 }
26806 break
26807 }
26808
26809
26810
26811 for {
26812 t := v.Type
26813 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26814 if v_0.Op != OpAMD64SHLLconst {
26815 continue
26816 }
26817 c := auxIntToInt8(v_0.AuxInt)
26818 x := v_0.Args[0]
26819 if v_1.Op != OpAMD64SHRWconst {
26820 continue
26821 }
26822 d := auxIntToInt8(v_1.AuxInt)
26823 if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
26824 continue
26825 }
26826 v.reset(OpAMD64ROLWconst)
26827 v.AuxInt = int8ToAuxInt(c)
26828 v.AddArg(x)
26829 return true
26830 }
26831 break
26832 }
26833
26834
26835
26836 for {
26837 t := v.Type
26838 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26839 if v_0.Op != OpAMD64SHLLconst {
26840 continue
26841 }
26842 c := auxIntToInt8(v_0.AuxInt)
26843 x := v_0.Args[0]
26844 if v_1.Op != OpAMD64SHRBconst {
26845 continue
26846 }
26847 d := auxIntToInt8(v_1.AuxInt)
26848 if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
26849 continue
26850 }
26851 v.reset(OpAMD64ROLBconst)
26852 v.AuxInt = int8ToAuxInt(c)
26853 v.AddArg(x)
26854 return true
26855 }
26856 break
26857 }
26858
26859
26860 for {
26861 x := v_0
26862 if x != v_1 {
26863 break
26864 }
26865 v.reset(OpAMD64MOVLconst)
26866 v.AuxInt = int32ToAuxInt(0)
26867 return true
26868 }
26869
26870
26871
26872 for {
26873 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26874 x := v_0
26875 l := v_1
26876 if l.Op != OpAMD64MOVLload {
26877 continue
26878 }
26879 off := auxIntToInt32(l.AuxInt)
26880 sym := auxToSym(l.Aux)
26881 mem := l.Args[1]
26882 ptr := l.Args[0]
26883 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
26884 continue
26885 }
26886 v.reset(OpAMD64XORLload)
26887 v.AuxInt = int32ToAuxInt(off)
26888 v.Aux = symToAux(sym)
26889 v.AddArg3(x, ptr, mem)
26890 return true
26891 }
26892 break
26893 }
26894
26895
26896
26897 for {
26898 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
26899 x := v_0
26900 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
26901 continue
26902 }
26903 v.reset(OpAMD64BLSMSKL)
26904 v.AddArg(x)
26905 return true
26906 }
26907 break
26908 }
26909 return false
26910 }
26911 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
26912 v_0 := v.Args[0]
26913
26914
26915
26916 for {
26917 c := auxIntToInt32(v.AuxInt)
26918 x := v_0
26919 if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
26920 break
26921 }
26922 v.reset(OpAMD64BTCLconst)
26923 v.AuxInt = int8ToAuxInt(int8(log32(c)))
26924 v.AddArg(x)
26925 return true
26926 }
26927
26928
26929 for {
26930 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
26931 break
26932 }
26933 x := v_0.Args[0]
26934 v.reset(OpAMD64SETEQ)
26935 v.AddArg(x)
26936 return true
26937 }
26938
26939
26940 for {
26941 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
26942 break
26943 }
26944 x := v_0.Args[0]
26945 v.reset(OpAMD64SETNE)
26946 v.AddArg(x)
26947 return true
26948 }
26949
26950
26951 for {
26952 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
26953 break
26954 }
26955 x := v_0.Args[0]
26956 v.reset(OpAMD64SETGE)
26957 v.AddArg(x)
26958 return true
26959 }
26960
26961
26962 for {
26963 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
26964 break
26965 }
26966 x := v_0.Args[0]
26967 v.reset(OpAMD64SETL)
26968 v.AddArg(x)
26969 return true
26970 }
26971
26972
26973 for {
26974 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
26975 break
26976 }
26977 x := v_0.Args[0]
26978 v.reset(OpAMD64SETG)
26979 v.AddArg(x)
26980 return true
26981 }
26982
26983
26984 for {
26985 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
26986 break
26987 }
26988 x := v_0.Args[0]
26989 v.reset(OpAMD64SETLE)
26990 v.AddArg(x)
26991 return true
26992 }
26993
26994
26995 for {
26996 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
26997 break
26998 }
26999 x := v_0.Args[0]
27000 v.reset(OpAMD64SETAE)
27001 v.AddArg(x)
27002 return true
27003 }
27004
27005
27006 for {
27007 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
27008 break
27009 }
27010 x := v_0.Args[0]
27011 v.reset(OpAMD64SETB)
27012 v.AddArg(x)
27013 return true
27014 }
27015
27016
27017 for {
27018 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
27019 break
27020 }
27021 x := v_0.Args[0]
27022 v.reset(OpAMD64SETA)
27023 v.AddArg(x)
27024 return true
27025 }
27026
27027
27028 for {
27029 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
27030 break
27031 }
27032 x := v_0.Args[0]
27033 v.reset(OpAMD64SETBE)
27034 v.AddArg(x)
27035 return true
27036 }
27037
27038
27039 for {
27040 c := auxIntToInt32(v.AuxInt)
27041 if v_0.Op != OpAMD64XORLconst {
27042 break
27043 }
27044 d := auxIntToInt32(v_0.AuxInt)
27045 x := v_0.Args[0]
27046 v.reset(OpAMD64XORLconst)
27047 v.AuxInt = int32ToAuxInt(c ^ d)
27048 v.AddArg(x)
27049 return true
27050 }
27051
27052
27053 for {
27054 c := auxIntToInt32(v.AuxInt)
27055 if v_0.Op != OpAMD64BTCLconst {
27056 break
27057 }
27058 d := auxIntToInt8(v_0.AuxInt)
27059 x := v_0.Args[0]
27060 v.reset(OpAMD64XORLconst)
27061 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
27062 v.AddArg(x)
27063 return true
27064 }
27065
27066
27067
27068 for {
27069 c := auxIntToInt32(v.AuxInt)
27070 x := v_0
27071 if !(c == 0) {
27072 break
27073 }
27074 v.copyOf(x)
27075 return true
27076 }
27077
27078
27079 for {
27080 c := auxIntToInt32(v.AuxInt)
27081 if v_0.Op != OpAMD64MOVLconst {
27082 break
27083 }
27084 d := auxIntToInt32(v_0.AuxInt)
27085 v.reset(OpAMD64MOVLconst)
27086 v.AuxInt = int32ToAuxInt(c ^ d)
27087 return true
27088 }
27089 return false
27090 }
27091 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
27092 v_1 := v.Args[1]
27093 v_0 := v.Args[0]
27094
27095
27096
27097 for {
27098 valoff1 := auxIntToValAndOff(v.AuxInt)
27099 sym := auxToSym(v.Aux)
27100 if v_0.Op != OpAMD64ADDQconst {
27101 break
27102 }
27103 off2 := auxIntToInt32(v_0.AuxInt)
27104 base := v_0.Args[0]
27105 mem := v_1
27106 if !(ValAndOff(valoff1).canAdd32(off2)) {
27107 break
27108 }
27109 v.reset(OpAMD64XORLconstmodify)
27110 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
27111 v.Aux = symToAux(sym)
27112 v.AddArg2(base, mem)
27113 return true
27114 }
27115
27116
27117
27118 for {
27119 valoff1 := auxIntToValAndOff(v.AuxInt)
27120 sym1 := auxToSym(v.Aux)
27121 if v_0.Op != OpAMD64LEAQ {
27122 break
27123 }
27124 off2 := auxIntToInt32(v_0.AuxInt)
27125 sym2 := auxToSym(v_0.Aux)
27126 base := v_0.Args[0]
27127 mem := v_1
27128 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
27129 break
27130 }
27131 v.reset(OpAMD64XORLconstmodify)
27132 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
27133 v.Aux = symToAux(mergeSym(sym1, sym2))
27134 v.AddArg2(base, mem)
27135 return true
27136 }
27137 return false
27138 }
27139 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
27140 v_2 := v.Args[2]
27141 v_1 := v.Args[1]
27142 v_0 := v.Args[0]
27143 b := v.Block
27144 typ := &b.Func.Config.Types
27145
27146
27147
27148 for {
27149 off1 := auxIntToInt32(v.AuxInt)
27150 sym := auxToSym(v.Aux)
27151 val := v_0
27152 if v_1.Op != OpAMD64ADDQconst {
27153 break
27154 }
27155 off2 := auxIntToInt32(v_1.AuxInt)
27156 base := v_1.Args[0]
27157 mem := v_2
27158 if !(is32Bit(int64(off1) + int64(off2))) {
27159 break
27160 }
27161 v.reset(OpAMD64XORLload)
27162 v.AuxInt = int32ToAuxInt(off1 + off2)
27163 v.Aux = symToAux(sym)
27164 v.AddArg3(val, base, mem)
27165 return true
27166 }
27167
27168
27169
27170 for {
27171 off1 := auxIntToInt32(v.AuxInt)
27172 sym1 := auxToSym(v.Aux)
27173 val := v_0
27174 if v_1.Op != OpAMD64LEAQ {
27175 break
27176 }
27177 off2 := auxIntToInt32(v_1.AuxInt)
27178 sym2 := auxToSym(v_1.Aux)
27179 base := v_1.Args[0]
27180 mem := v_2
27181 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27182 break
27183 }
27184 v.reset(OpAMD64XORLload)
27185 v.AuxInt = int32ToAuxInt(off1 + off2)
27186 v.Aux = symToAux(mergeSym(sym1, sym2))
27187 v.AddArg3(val, base, mem)
27188 return true
27189 }
27190
27191
27192 for {
27193 off := auxIntToInt32(v.AuxInt)
27194 sym := auxToSym(v.Aux)
27195 x := v_0
27196 ptr := v_1
27197 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
27198 break
27199 }
27200 y := v_2.Args[1]
27201 if ptr != v_2.Args[0] {
27202 break
27203 }
27204 v.reset(OpAMD64XORL)
27205 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
27206 v0.AddArg(y)
27207 v.AddArg2(x, v0)
27208 return true
27209 }
27210 return false
27211 }
27212 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
27213 v_2 := v.Args[2]
27214 v_1 := v.Args[1]
27215 v_0 := v.Args[0]
27216
27217
27218
27219 for {
27220 off1 := auxIntToInt32(v.AuxInt)
27221 sym := auxToSym(v.Aux)
27222 if v_0.Op != OpAMD64ADDQconst {
27223 break
27224 }
27225 off2 := auxIntToInt32(v_0.AuxInt)
27226 base := v_0.Args[0]
27227 val := v_1
27228 mem := v_2
27229 if !(is32Bit(int64(off1) + int64(off2))) {
27230 break
27231 }
27232 v.reset(OpAMD64XORLmodify)
27233 v.AuxInt = int32ToAuxInt(off1 + off2)
27234 v.Aux = symToAux(sym)
27235 v.AddArg3(base, val, mem)
27236 return true
27237 }
27238
27239
27240
27241 for {
27242 off1 := auxIntToInt32(v.AuxInt)
27243 sym1 := auxToSym(v.Aux)
27244 if v_0.Op != OpAMD64LEAQ {
27245 break
27246 }
27247 off2 := auxIntToInt32(v_0.AuxInt)
27248 sym2 := auxToSym(v_0.Aux)
27249 base := v_0.Args[0]
27250 val := v_1
27251 mem := v_2
27252 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27253 break
27254 }
27255 v.reset(OpAMD64XORLmodify)
27256 v.AuxInt = int32ToAuxInt(off1 + off2)
27257 v.Aux = symToAux(mergeSym(sym1, sym2))
27258 v.AddArg3(base, val, mem)
27259 return true
27260 }
27261 return false
27262 }
27263 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
27264 v_1 := v.Args[1]
27265 v_0 := v.Args[0]
27266
27267
27268 for {
27269 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27270 if v_0.Op != OpAMD64SHLQ {
27271 continue
27272 }
27273 y := v_0.Args[1]
27274 v_0_0 := v_0.Args[0]
27275 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
27276 continue
27277 }
27278 x := v_1
27279 v.reset(OpAMD64BTCQ)
27280 v.AddArg2(x, y)
27281 return true
27282 }
27283 break
27284 }
27285
27286
27287
27288 for {
27289 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27290 if v_0.Op != OpAMD64MOVQconst {
27291 continue
27292 }
27293 c := auxIntToInt64(v_0.AuxInt)
27294 x := v_1
27295 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
27296 continue
27297 }
27298 v.reset(OpAMD64BTCQconst)
27299 v.AuxInt = int8ToAuxInt(int8(log64(c)))
27300 v.AddArg(x)
27301 return true
27302 }
27303 break
27304 }
27305
27306
27307
27308 for {
27309 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27310 x := v_0
27311 if v_1.Op != OpAMD64MOVQconst {
27312 continue
27313 }
27314 c := auxIntToInt64(v_1.AuxInt)
27315 if !(is32Bit(c)) {
27316 continue
27317 }
27318 v.reset(OpAMD64XORQconst)
27319 v.AuxInt = int32ToAuxInt(int32(c))
27320 v.AddArg(x)
27321 return true
27322 }
27323 break
27324 }
27325
27326
27327
27328 for {
27329 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27330 if v_0.Op != OpAMD64SHLQconst {
27331 continue
27332 }
27333 c := auxIntToInt8(v_0.AuxInt)
27334 x := v_0.Args[0]
27335 if v_1.Op != OpAMD64SHRQconst {
27336 continue
27337 }
27338 d := auxIntToInt8(v_1.AuxInt)
27339 if x != v_1.Args[0] || !(d == 64-c) {
27340 continue
27341 }
27342 v.reset(OpAMD64ROLQconst)
27343 v.AuxInt = int8ToAuxInt(c)
27344 v.AddArg(x)
27345 return true
27346 }
27347 break
27348 }
27349
27350
27351 for {
27352 x := v_0
27353 if x != v_1 {
27354 break
27355 }
27356 v.reset(OpAMD64MOVQconst)
27357 v.AuxInt = int64ToAuxInt(0)
27358 return true
27359 }
27360
27361
27362
27363 for {
27364 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27365 x := v_0
27366 l := v_1
27367 if l.Op != OpAMD64MOVQload {
27368 continue
27369 }
27370 off := auxIntToInt32(l.AuxInt)
27371 sym := auxToSym(l.Aux)
27372 mem := l.Args[1]
27373 ptr := l.Args[0]
27374 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
27375 continue
27376 }
27377 v.reset(OpAMD64XORQload)
27378 v.AuxInt = int32ToAuxInt(off)
27379 v.Aux = symToAux(sym)
27380 v.AddArg3(x, ptr, mem)
27381 return true
27382 }
27383 break
27384 }
27385
27386
27387
27388 for {
27389 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
27390 x := v_0
27391 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
27392 continue
27393 }
27394 v.reset(OpAMD64BLSMSKQ)
27395 v.AddArg(x)
27396 return true
27397 }
27398 break
27399 }
27400 return false
27401 }
27402 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
27403 v_0 := v.Args[0]
27404
27405
27406
27407 for {
27408 c := auxIntToInt32(v.AuxInt)
27409 x := v_0
27410 if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
27411 break
27412 }
27413 v.reset(OpAMD64BTCQconst)
27414 v.AuxInt = int8ToAuxInt(int8(log32(c)))
27415 v.AddArg(x)
27416 return true
27417 }
27418
27419
27420 for {
27421 c := auxIntToInt32(v.AuxInt)
27422 if v_0.Op != OpAMD64XORQconst {
27423 break
27424 }
27425 d := auxIntToInt32(v_0.AuxInt)
27426 x := v_0.Args[0]
27427 v.reset(OpAMD64XORQconst)
27428 v.AuxInt = int32ToAuxInt(c ^ d)
27429 v.AddArg(x)
27430 return true
27431 }
27432
27433
27434
27435 for {
27436 c := auxIntToInt32(v.AuxInt)
27437 if v_0.Op != OpAMD64BTCQconst {
27438 break
27439 }
27440 d := auxIntToInt8(v_0.AuxInt)
27441 x := v_0.Args[0]
27442 if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
27443 break
27444 }
27445 v.reset(OpAMD64XORQconst)
27446 v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
27447 v.AddArg(x)
27448 return true
27449 }
27450
27451
27452 for {
27453 if auxIntToInt32(v.AuxInt) != 0 {
27454 break
27455 }
27456 x := v_0
27457 v.copyOf(x)
27458 return true
27459 }
27460
27461
27462 for {
27463 c := auxIntToInt32(v.AuxInt)
27464 if v_0.Op != OpAMD64MOVQconst {
27465 break
27466 }
27467 d := auxIntToInt64(v_0.AuxInt)
27468 v.reset(OpAMD64MOVQconst)
27469 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
27470 return true
27471 }
27472 return false
27473 }
27474 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
27475 v_1 := v.Args[1]
27476 v_0 := v.Args[0]
27477
27478
27479
27480 for {
27481 valoff1 := auxIntToValAndOff(v.AuxInt)
27482 sym := auxToSym(v.Aux)
27483 if v_0.Op != OpAMD64ADDQconst {
27484 break
27485 }
27486 off2 := auxIntToInt32(v_0.AuxInt)
27487 base := v_0.Args[0]
27488 mem := v_1
27489 if !(ValAndOff(valoff1).canAdd32(off2)) {
27490 break
27491 }
27492 v.reset(OpAMD64XORQconstmodify)
27493 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
27494 v.Aux = symToAux(sym)
27495 v.AddArg2(base, mem)
27496 return true
27497 }
27498
27499
27500
27501 for {
27502 valoff1 := auxIntToValAndOff(v.AuxInt)
27503 sym1 := auxToSym(v.Aux)
27504 if v_0.Op != OpAMD64LEAQ {
27505 break
27506 }
27507 off2 := auxIntToInt32(v_0.AuxInt)
27508 sym2 := auxToSym(v_0.Aux)
27509 base := v_0.Args[0]
27510 mem := v_1
27511 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
27512 break
27513 }
27514 v.reset(OpAMD64XORQconstmodify)
27515 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
27516 v.Aux = symToAux(mergeSym(sym1, sym2))
27517 v.AddArg2(base, mem)
27518 return true
27519 }
27520 return false
27521 }
27522 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
27523 v_2 := v.Args[2]
27524 v_1 := v.Args[1]
27525 v_0 := v.Args[0]
27526 b := v.Block
27527 typ := &b.Func.Config.Types
27528
27529
27530
27531 for {
27532 off1 := auxIntToInt32(v.AuxInt)
27533 sym := auxToSym(v.Aux)
27534 val := v_0
27535 if v_1.Op != OpAMD64ADDQconst {
27536 break
27537 }
27538 off2 := auxIntToInt32(v_1.AuxInt)
27539 base := v_1.Args[0]
27540 mem := v_2
27541 if !(is32Bit(int64(off1) + int64(off2))) {
27542 break
27543 }
27544 v.reset(OpAMD64XORQload)
27545 v.AuxInt = int32ToAuxInt(off1 + off2)
27546 v.Aux = symToAux(sym)
27547 v.AddArg3(val, base, mem)
27548 return true
27549 }
27550
27551
27552
27553 for {
27554 off1 := auxIntToInt32(v.AuxInt)
27555 sym1 := auxToSym(v.Aux)
27556 val := v_0
27557 if v_1.Op != OpAMD64LEAQ {
27558 break
27559 }
27560 off2 := auxIntToInt32(v_1.AuxInt)
27561 sym2 := auxToSym(v_1.Aux)
27562 base := v_1.Args[0]
27563 mem := v_2
27564 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27565 break
27566 }
27567 v.reset(OpAMD64XORQload)
27568 v.AuxInt = int32ToAuxInt(off1 + off2)
27569 v.Aux = symToAux(mergeSym(sym1, sym2))
27570 v.AddArg3(val, base, mem)
27571 return true
27572 }
27573
27574
27575 for {
27576 off := auxIntToInt32(v.AuxInt)
27577 sym := auxToSym(v.Aux)
27578 x := v_0
27579 ptr := v_1
27580 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
27581 break
27582 }
27583 y := v_2.Args[1]
27584 if ptr != v_2.Args[0] {
27585 break
27586 }
27587 v.reset(OpAMD64XORQ)
27588 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
27589 v0.AddArg(y)
27590 v.AddArg2(x, v0)
27591 return true
27592 }
27593 return false
27594 }
27595 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
27596 v_2 := v.Args[2]
27597 v_1 := v.Args[1]
27598 v_0 := v.Args[0]
27599
27600
27601
27602 for {
27603 off1 := auxIntToInt32(v.AuxInt)
27604 sym := auxToSym(v.Aux)
27605 if v_0.Op != OpAMD64ADDQconst {
27606 break
27607 }
27608 off2 := auxIntToInt32(v_0.AuxInt)
27609 base := v_0.Args[0]
27610 val := v_1
27611 mem := v_2
27612 if !(is32Bit(int64(off1) + int64(off2))) {
27613 break
27614 }
27615 v.reset(OpAMD64XORQmodify)
27616 v.AuxInt = int32ToAuxInt(off1 + off2)
27617 v.Aux = symToAux(sym)
27618 v.AddArg3(base, val, mem)
27619 return true
27620 }
27621
27622
27623
27624 for {
27625 off1 := auxIntToInt32(v.AuxInt)
27626 sym1 := auxToSym(v.Aux)
27627 if v_0.Op != OpAMD64LEAQ {
27628 break
27629 }
27630 off2 := auxIntToInt32(v_0.AuxInt)
27631 sym2 := auxToSym(v_0.Aux)
27632 base := v_0.Args[0]
27633 val := v_1
27634 mem := v_2
27635 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
27636 break
27637 }
27638 v.reset(OpAMD64XORQmodify)
27639 v.AuxInt = int32ToAuxInt(off1 + off2)
27640 v.Aux = symToAux(mergeSym(sym1, sym2))
27641 v.AddArg3(base, val, mem)
27642 return true
27643 }
27644 return false
27645 }
27646 func rewriteValueAMD64_OpAddr(v *Value) bool {
27647 v_0 := v.Args[0]
27648
27649
27650 for {
27651 sym := auxToSym(v.Aux)
27652 base := v_0
27653 v.reset(OpAMD64LEAQ)
27654 v.Aux = symToAux(sym)
27655 v.AddArg(base)
27656 return true
27657 }
27658 }
27659 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
27660 v_2 := v.Args[2]
27661 v_1 := v.Args[1]
27662 v_0 := v.Args[0]
27663 b := v.Block
27664 typ := &b.Func.Config.Types
27665
27666
27667 for {
27668 ptr := v_0
27669 val := v_1
27670 mem := v_2
27671 v.reset(OpAMD64AddTupleFirst32)
27672 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
27673 v0.AddArg3(val, ptr, mem)
27674 v.AddArg2(val, v0)
27675 return true
27676 }
27677 }
27678 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
27679 v_2 := v.Args[2]
27680 v_1 := v.Args[1]
27681 v_0 := v.Args[0]
27682 b := v.Block
27683 typ := &b.Func.Config.Types
27684
27685
27686 for {
27687 ptr := v_0
27688 val := v_1
27689 mem := v_2
27690 v.reset(OpAMD64AddTupleFirst64)
27691 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
27692 v0.AddArg3(val, ptr, mem)
27693 v.AddArg2(val, v0)
27694 return true
27695 }
27696 }
27697 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
27698 v_2 := v.Args[2]
27699 v_1 := v.Args[1]
27700 v_0 := v.Args[0]
27701
27702
27703 for {
27704 ptr := v_0
27705 val := v_1
27706 mem := v_2
27707 v.reset(OpAMD64ANDLlock)
27708 v.AddArg3(ptr, val, mem)
27709 return true
27710 }
27711 }
27712 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
27713 v_2 := v.Args[2]
27714 v_1 := v.Args[1]
27715 v_0 := v.Args[0]
27716
27717
27718 for {
27719 ptr := v_0
27720 val := v_1
27721 mem := v_2
27722 v.reset(OpAMD64ANDBlock)
27723 v.AddArg3(ptr, val, mem)
27724 return true
27725 }
27726 }
27727 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
27728 v_3 := v.Args[3]
27729 v_2 := v.Args[2]
27730 v_1 := v.Args[1]
27731 v_0 := v.Args[0]
27732
27733
27734 for {
27735 ptr := v_0
27736 old := v_1
27737 new_ := v_2
27738 mem := v_3
27739 v.reset(OpAMD64CMPXCHGLlock)
27740 v.AddArg4(ptr, old, new_, mem)
27741 return true
27742 }
27743 }
27744 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
27745 v_3 := v.Args[3]
27746 v_2 := v.Args[2]
27747 v_1 := v.Args[1]
27748 v_0 := v.Args[0]
27749
27750
27751 for {
27752 ptr := v_0
27753 old := v_1
27754 new_ := v_2
27755 mem := v_3
27756 v.reset(OpAMD64CMPXCHGQlock)
27757 v.AddArg4(ptr, old, new_, mem)
27758 return true
27759 }
27760 }
27761 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
27762 v_2 := v.Args[2]
27763 v_1 := v.Args[1]
27764 v_0 := v.Args[0]
27765
27766
27767 for {
27768 ptr := v_0
27769 val := v_1
27770 mem := v_2
27771 v.reset(OpAMD64XCHGL)
27772 v.AddArg3(val, ptr, mem)
27773 return true
27774 }
27775 }
27776 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
27777 v_2 := v.Args[2]
27778 v_1 := v.Args[1]
27779 v_0 := v.Args[0]
27780
27781
27782 for {
27783 ptr := v_0
27784 val := v_1
27785 mem := v_2
27786 v.reset(OpAMD64XCHGQ)
27787 v.AddArg3(val, ptr, mem)
27788 return true
27789 }
27790 }
27791 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
27792 v_1 := v.Args[1]
27793 v_0 := v.Args[0]
27794
27795
27796 for {
27797 ptr := v_0
27798 mem := v_1
27799 v.reset(OpAMD64MOVLatomicload)
27800 v.AddArg2(ptr, mem)
27801 return true
27802 }
27803 }
27804 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
27805 v_1 := v.Args[1]
27806 v_0 := v.Args[0]
27807
27808
27809 for {
27810 ptr := v_0
27811 mem := v_1
27812 v.reset(OpAMD64MOVQatomicload)
27813 v.AddArg2(ptr, mem)
27814 return true
27815 }
27816 }
27817 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
27818 v_1 := v.Args[1]
27819 v_0 := v.Args[0]
27820
27821
27822 for {
27823 ptr := v_0
27824 mem := v_1
27825 v.reset(OpAMD64MOVBatomicload)
27826 v.AddArg2(ptr, mem)
27827 return true
27828 }
27829 }
27830 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
27831 v_1 := v.Args[1]
27832 v_0 := v.Args[0]
27833
27834
27835 for {
27836 ptr := v_0
27837 mem := v_1
27838 v.reset(OpAMD64MOVQatomicload)
27839 v.AddArg2(ptr, mem)
27840 return true
27841 }
27842 }
27843 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
27844 v_2 := v.Args[2]
27845 v_1 := v.Args[1]
27846 v_0 := v.Args[0]
27847
27848
27849 for {
27850 ptr := v_0
27851 val := v_1
27852 mem := v_2
27853 v.reset(OpAMD64ORLlock)
27854 v.AddArg3(ptr, val, mem)
27855 return true
27856 }
27857 }
27858 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
27859 v_2 := v.Args[2]
27860 v_1 := v.Args[1]
27861 v_0 := v.Args[0]
27862
27863
27864 for {
27865 ptr := v_0
27866 val := v_1
27867 mem := v_2
27868 v.reset(OpAMD64ORBlock)
27869 v.AddArg3(ptr, val, mem)
27870 return true
27871 }
27872 }
27873 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
27874 v_2 := v.Args[2]
27875 v_1 := v.Args[1]
27876 v_0 := v.Args[0]
27877 b := v.Block
27878 typ := &b.Func.Config.Types
27879
27880
27881 for {
27882 ptr := v_0
27883 val := v_1
27884 mem := v_2
27885 v.reset(OpSelect1)
27886 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
27887 v0.AddArg3(val, ptr, mem)
27888 v.AddArg(v0)
27889 return true
27890 }
27891 }
27892 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
27893 v_2 := v.Args[2]
27894 v_1 := v.Args[1]
27895 v_0 := v.Args[0]
27896 b := v.Block
27897 typ := &b.Func.Config.Types
27898
27899
27900 for {
27901 ptr := v_0
27902 val := v_1
27903 mem := v_2
27904 v.reset(OpSelect1)
27905 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
27906 v0.AddArg3(val, ptr, mem)
27907 v.AddArg(v0)
27908 return true
27909 }
27910 }
27911 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
27912 v_2 := v.Args[2]
27913 v_1 := v.Args[1]
27914 v_0 := v.Args[0]
27915 b := v.Block
27916 typ := &b.Func.Config.Types
27917
27918
27919 for {
27920 ptr := v_0
27921 val := v_1
27922 mem := v_2
27923 v.reset(OpSelect1)
27924 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
27925 v0.AddArg3(val, ptr, mem)
27926 v.AddArg(v0)
27927 return true
27928 }
27929 }
27930 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
27931 v_2 := v.Args[2]
27932 v_1 := v.Args[1]
27933 v_0 := v.Args[0]
27934 b := v.Block
27935 typ := &b.Func.Config.Types
27936
27937
27938 for {
27939 ptr := v_0
27940 val := v_1
27941 mem := v_2
27942 v.reset(OpSelect1)
27943 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
27944 v0.AddArg3(val, ptr, mem)
27945 v.AddArg(v0)
27946 return true
27947 }
27948 }
27949 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
27950 v_0 := v.Args[0]
27951 b := v.Block
27952 typ := &b.Func.Config.Types
27953
27954
27955 for {
27956 x := v_0
27957 v.reset(OpAMD64BSRL)
27958 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
27959 v0.AuxInt = int32ToAuxInt(1)
27960 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
27961 v1.AddArg(x)
27962 v0.AddArg2(v1, v1)
27963 v.AddArg(v0)
27964 return true
27965 }
27966 }
27967 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
27968 v_0 := v.Args[0]
27969 b := v.Block
27970 typ := &b.Func.Config.Types
27971
27972
27973 for {
27974 x := v_0
27975 v.reset(OpSelect0)
27976 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
27977 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
27978 v1.AuxInt = int32ToAuxInt(1)
27979 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
27980 v2.AddArg(x)
27981 v1.AddArg2(v2, v2)
27982 v0.AddArg(v1)
27983 v.AddArg(v0)
27984 return true
27985 }
27986 }
27987 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
27988 v_0 := v.Args[0]
27989 b := v.Block
27990 typ := &b.Func.Config.Types
27991
27992
27993 for {
27994 t := v.Type
27995 x := v_0
27996 v.reset(OpAMD64ADDQconst)
27997 v.AuxInt = int32ToAuxInt(1)
27998 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
27999 v1 := b.NewValue0(v.Pos, OpSelect0, t)
28000 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
28001 v2.AddArg(x)
28002 v1.AddArg(v2)
28003 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
28004 v3.AuxInt = int64ToAuxInt(-1)
28005 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
28006 v4.AddArg(v2)
28007 v0.AddArg3(v1, v3, v4)
28008 v.AddArg(v0)
28009 return true
28010 }
28011 }
28012 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
28013 v_0 := v.Args[0]
28014 b := v.Block
28015 typ := &b.Func.Config.Types
28016
28017
28018 for {
28019 x := v_0
28020 v.reset(OpAMD64BSRL)
28021 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
28022 v0.AuxInt = int32ToAuxInt(1)
28023 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
28024 v1.AddArg(x)
28025 v0.AddArg2(v1, v1)
28026 v.AddArg(v0)
28027 return true
28028 }
28029 }
28030 func rewriteValueAMD64_OpCeil(v *Value) bool {
28031 v_0 := v.Args[0]
28032
28033
28034 for {
28035 x := v_0
28036 v.reset(OpAMD64ROUNDSD)
28037 v.AuxInt = int8ToAuxInt(2)
28038 v.AddArg(x)
28039 return true
28040 }
28041 }
28042 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
28043 v_2 := v.Args[2]
28044 v_1 := v.Args[1]
28045 v_0 := v.Args[0]
28046 b := v.Block
28047 typ := &b.Func.Config.Types
28048
28049
28050
28051 for {
28052 t := v.Type
28053 x := v_0
28054 y := v_1
28055 if v_2.Op != OpAMD64SETEQ {
28056 break
28057 }
28058 cond := v_2.Args[0]
28059 if !(is64BitInt(t) || isPtr(t)) {
28060 break
28061 }
28062 v.reset(OpAMD64CMOVQEQ)
28063 v.AddArg3(y, x, cond)
28064 return true
28065 }
28066
28067
28068
28069 for {
28070 t := v.Type
28071 x := v_0
28072 y := v_1
28073 if v_2.Op != OpAMD64SETNE {
28074 break
28075 }
28076 cond := v_2.Args[0]
28077 if !(is64BitInt(t) || isPtr(t)) {
28078 break
28079 }
28080 v.reset(OpAMD64CMOVQNE)
28081 v.AddArg3(y, x, cond)
28082 return true
28083 }
28084
28085
28086
28087 for {
28088 t := v.Type
28089 x := v_0
28090 y := v_1
28091 if v_2.Op != OpAMD64SETL {
28092 break
28093 }
28094 cond := v_2.Args[0]
28095 if !(is64BitInt(t) || isPtr(t)) {
28096 break
28097 }
28098 v.reset(OpAMD64CMOVQLT)
28099 v.AddArg3(y, x, cond)
28100 return true
28101 }
28102
28103
28104
28105 for {
28106 t := v.Type
28107 x := v_0
28108 y := v_1
28109 if v_2.Op != OpAMD64SETG {
28110 break
28111 }
28112 cond := v_2.Args[0]
28113 if !(is64BitInt(t) || isPtr(t)) {
28114 break
28115 }
28116 v.reset(OpAMD64CMOVQGT)
28117 v.AddArg3(y, x, cond)
28118 return true
28119 }
28120
28121
28122
28123 for {
28124 t := v.Type
28125 x := v_0
28126 y := v_1
28127 if v_2.Op != OpAMD64SETLE {
28128 break
28129 }
28130 cond := v_2.Args[0]
28131 if !(is64BitInt(t) || isPtr(t)) {
28132 break
28133 }
28134 v.reset(OpAMD64CMOVQLE)
28135 v.AddArg3(y, x, cond)
28136 return true
28137 }
28138
28139
28140
28141 for {
28142 t := v.Type
28143 x := v_0
28144 y := v_1
28145 if v_2.Op != OpAMD64SETGE {
28146 break
28147 }
28148 cond := v_2.Args[0]
28149 if !(is64BitInt(t) || isPtr(t)) {
28150 break
28151 }
28152 v.reset(OpAMD64CMOVQGE)
28153 v.AddArg3(y, x, cond)
28154 return true
28155 }
28156
28157
28158
28159 for {
28160 t := v.Type
28161 x := v_0
28162 y := v_1
28163 if v_2.Op != OpAMD64SETA {
28164 break
28165 }
28166 cond := v_2.Args[0]
28167 if !(is64BitInt(t) || isPtr(t)) {
28168 break
28169 }
28170 v.reset(OpAMD64CMOVQHI)
28171 v.AddArg3(y, x, cond)
28172 return true
28173 }
28174
28175
28176
28177 for {
28178 t := v.Type
28179 x := v_0
28180 y := v_1
28181 if v_2.Op != OpAMD64SETB {
28182 break
28183 }
28184 cond := v_2.Args[0]
28185 if !(is64BitInt(t) || isPtr(t)) {
28186 break
28187 }
28188 v.reset(OpAMD64CMOVQCS)
28189 v.AddArg3(y, x, cond)
28190 return true
28191 }
28192
28193
28194
28195 for {
28196 t := v.Type
28197 x := v_0
28198 y := v_1
28199 if v_2.Op != OpAMD64SETAE {
28200 break
28201 }
28202 cond := v_2.Args[0]
28203 if !(is64BitInt(t) || isPtr(t)) {
28204 break
28205 }
28206 v.reset(OpAMD64CMOVQCC)
28207 v.AddArg3(y, x, cond)
28208 return true
28209 }
28210
28211
28212
28213 for {
28214 t := v.Type
28215 x := v_0
28216 y := v_1
28217 if v_2.Op != OpAMD64SETBE {
28218 break
28219 }
28220 cond := v_2.Args[0]
28221 if !(is64BitInt(t) || isPtr(t)) {
28222 break
28223 }
28224 v.reset(OpAMD64CMOVQLS)
28225 v.AddArg3(y, x, cond)
28226 return true
28227 }
28228
28229
28230
28231 for {
28232 t := v.Type
28233 x := v_0
28234 y := v_1
28235 if v_2.Op != OpAMD64SETEQF {
28236 break
28237 }
28238 cond := v_2.Args[0]
28239 if !(is64BitInt(t) || isPtr(t)) {
28240 break
28241 }
28242 v.reset(OpAMD64CMOVQEQF)
28243 v.AddArg3(y, x, cond)
28244 return true
28245 }
28246
28247
28248
28249 for {
28250 t := v.Type
28251 x := v_0
28252 y := v_1
28253 if v_2.Op != OpAMD64SETNEF {
28254 break
28255 }
28256 cond := v_2.Args[0]
28257 if !(is64BitInt(t) || isPtr(t)) {
28258 break
28259 }
28260 v.reset(OpAMD64CMOVQNEF)
28261 v.AddArg3(y, x, cond)
28262 return true
28263 }
28264
28265
28266
28267 for {
28268 t := v.Type
28269 x := v_0
28270 y := v_1
28271 if v_2.Op != OpAMD64SETGF {
28272 break
28273 }
28274 cond := v_2.Args[0]
28275 if !(is64BitInt(t) || isPtr(t)) {
28276 break
28277 }
28278 v.reset(OpAMD64CMOVQGTF)
28279 v.AddArg3(y, x, cond)
28280 return true
28281 }
28282
28283
28284
28285 for {
28286 t := v.Type
28287 x := v_0
28288 y := v_1
28289 if v_2.Op != OpAMD64SETGEF {
28290 break
28291 }
28292 cond := v_2.Args[0]
28293 if !(is64BitInt(t) || isPtr(t)) {
28294 break
28295 }
28296 v.reset(OpAMD64CMOVQGEF)
28297 v.AddArg3(y, x, cond)
28298 return true
28299 }
28300
28301
28302
28303 for {
28304 t := v.Type
28305 x := v_0
28306 y := v_1
28307 if v_2.Op != OpAMD64SETEQ {
28308 break
28309 }
28310 cond := v_2.Args[0]
28311 if !(is32BitInt(t)) {
28312 break
28313 }
28314 v.reset(OpAMD64CMOVLEQ)
28315 v.AddArg3(y, x, cond)
28316 return true
28317 }
28318
28319
28320
28321 for {
28322 t := v.Type
28323 x := v_0
28324 y := v_1
28325 if v_2.Op != OpAMD64SETNE {
28326 break
28327 }
28328 cond := v_2.Args[0]
28329 if !(is32BitInt(t)) {
28330 break
28331 }
28332 v.reset(OpAMD64CMOVLNE)
28333 v.AddArg3(y, x, cond)
28334 return true
28335 }
28336
28337
28338
28339 for {
28340 t := v.Type
28341 x := v_0
28342 y := v_1
28343 if v_2.Op != OpAMD64SETL {
28344 break
28345 }
28346 cond := v_2.Args[0]
28347 if !(is32BitInt(t)) {
28348 break
28349 }
28350 v.reset(OpAMD64CMOVLLT)
28351 v.AddArg3(y, x, cond)
28352 return true
28353 }
28354
28355
28356
28357 for {
28358 t := v.Type
28359 x := v_0
28360 y := v_1
28361 if v_2.Op != OpAMD64SETG {
28362 break
28363 }
28364 cond := v_2.Args[0]
28365 if !(is32BitInt(t)) {
28366 break
28367 }
28368 v.reset(OpAMD64CMOVLGT)
28369 v.AddArg3(y, x, cond)
28370 return true
28371 }
28372
28373
28374
28375 for {
28376 t := v.Type
28377 x := v_0
28378 y := v_1
28379 if v_2.Op != OpAMD64SETLE {
28380 break
28381 }
28382 cond := v_2.Args[0]
28383 if !(is32BitInt(t)) {
28384 break
28385 }
28386 v.reset(OpAMD64CMOVLLE)
28387 v.AddArg3(y, x, cond)
28388 return true
28389 }
28390
28391
28392
28393 for {
28394 t := v.Type
28395 x := v_0
28396 y := v_1
28397 if v_2.Op != OpAMD64SETGE {
28398 break
28399 }
28400 cond := v_2.Args[0]
28401 if !(is32BitInt(t)) {
28402 break
28403 }
28404 v.reset(OpAMD64CMOVLGE)
28405 v.AddArg3(y, x, cond)
28406 return true
28407 }
28408
28409
28410
28411 for {
28412 t := v.Type
28413 x := v_0
28414 y := v_1
28415 if v_2.Op != OpAMD64SETA {
28416 break
28417 }
28418 cond := v_2.Args[0]
28419 if !(is32BitInt(t)) {
28420 break
28421 }
28422 v.reset(OpAMD64CMOVLHI)
28423 v.AddArg3(y, x, cond)
28424 return true
28425 }
28426
28427
28428
28429 for {
28430 t := v.Type
28431 x := v_0
28432 y := v_1
28433 if v_2.Op != OpAMD64SETB {
28434 break
28435 }
28436 cond := v_2.Args[0]
28437 if !(is32BitInt(t)) {
28438 break
28439 }
28440 v.reset(OpAMD64CMOVLCS)
28441 v.AddArg3(y, x, cond)
28442 return true
28443 }
28444
28445
28446
28447 for {
28448 t := v.Type
28449 x := v_0
28450 y := v_1
28451 if v_2.Op != OpAMD64SETAE {
28452 break
28453 }
28454 cond := v_2.Args[0]
28455 if !(is32BitInt(t)) {
28456 break
28457 }
28458 v.reset(OpAMD64CMOVLCC)
28459 v.AddArg3(y, x, cond)
28460 return true
28461 }
28462
28463
28464
28465 for {
28466 t := v.Type
28467 x := v_0
28468 y := v_1
28469 if v_2.Op != OpAMD64SETBE {
28470 break
28471 }
28472 cond := v_2.Args[0]
28473 if !(is32BitInt(t)) {
28474 break
28475 }
28476 v.reset(OpAMD64CMOVLLS)
28477 v.AddArg3(y, x, cond)
28478 return true
28479 }
28480
28481
28482
28483 for {
28484 t := v.Type
28485 x := v_0
28486 y := v_1
28487 if v_2.Op != OpAMD64SETEQF {
28488 break
28489 }
28490 cond := v_2.Args[0]
28491 if !(is32BitInt(t)) {
28492 break
28493 }
28494 v.reset(OpAMD64CMOVLEQF)
28495 v.AddArg3(y, x, cond)
28496 return true
28497 }
28498
28499
28500
28501 for {
28502 t := v.Type
28503 x := v_0
28504 y := v_1
28505 if v_2.Op != OpAMD64SETNEF {
28506 break
28507 }
28508 cond := v_2.Args[0]
28509 if !(is32BitInt(t)) {
28510 break
28511 }
28512 v.reset(OpAMD64CMOVLNEF)
28513 v.AddArg3(y, x, cond)
28514 return true
28515 }
28516
28517
28518
28519 for {
28520 t := v.Type
28521 x := v_0
28522 y := v_1
28523 if v_2.Op != OpAMD64SETGF {
28524 break
28525 }
28526 cond := v_2.Args[0]
28527 if !(is32BitInt(t)) {
28528 break
28529 }
28530 v.reset(OpAMD64CMOVLGTF)
28531 v.AddArg3(y, x, cond)
28532 return true
28533 }
28534
28535
28536
28537 for {
28538 t := v.Type
28539 x := v_0
28540 y := v_1
28541 if v_2.Op != OpAMD64SETGEF {
28542 break
28543 }
28544 cond := v_2.Args[0]
28545 if !(is32BitInt(t)) {
28546 break
28547 }
28548 v.reset(OpAMD64CMOVLGEF)
28549 v.AddArg3(y, x, cond)
28550 return true
28551 }
28552
28553
28554
28555 for {
28556 t := v.Type
28557 x := v_0
28558 y := v_1
28559 if v_2.Op != OpAMD64SETEQ {
28560 break
28561 }
28562 cond := v_2.Args[0]
28563 if !(is16BitInt(t)) {
28564 break
28565 }
28566 v.reset(OpAMD64CMOVWEQ)
28567 v.AddArg3(y, x, cond)
28568 return true
28569 }
28570
28571
28572
28573 for {
28574 t := v.Type
28575 x := v_0
28576 y := v_1
28577 if v_2.Op != OpAMD64SETNE {
28578 break
28579 }
28580 cond := v_2.Args[0]
28581 if !(is16BitInt(t)) {
28582 break
28583 }
28584 v.reset(OpAMD64CMOVWNE)
28585 v.AddArg3(y, x, cond)
28586 return true
28587 }
28588
28589
28590
28591 for {
28592 t := v.Type
28593 x := v_0
28594 y := v_1
28595 if v_2.Op != OpAMD64SETL {
28596 break
28597 }
28598 cond := v_2.Args[0]
28599 if !(is16BitInt(t)) {
28600 break
28601 }
28602 v.reset(OpAMD64CMOVWLT)
28603 v.AddArg3(y, x, cond)
28604 return true
28605 }
28606
28607
28608
28609 for {
28610 t := v.Type
28611 x := v_0
28612 y := v_1
28613 if v_2.Op != OpAMD64SETG {
28614 break
28615 }
28616 cond := v_2.Args[0]
28617 if !(is16BitInt(t)) {
28618 break
28619 }
28620 v.reset(OpAMD64CMOVWGT)
28621 v.AddArg3(y, x, cond)
28622 return true
28623 }
28624
28625
28626
28627 for {
28628 t := v.Type
28629 x := v_0
28630 y := v_1
28631 if v_2.Op != OpAMD64SETLE {
28632 break
28633 }
28634 cond := v_2.Args[0]
28635 if !(is16BitInt(t)) {
28636 break
28637 }
28638 v.reset(OpAMD64CMOVWLE)
28639 v.AddArg3(y, x, cond)
28640 return true
28641 }
28642
28643
28644
28645 for {
28646 t := v.Type
28647 x := v_0
28648 y := v_1
28649 if v_2.Op != OpAMD64SETGE {
28650 break
28651 }
28652 cond := v_2.Args[0]
28653 if !(is16BitInt(t)) {
28654 break
28655 }
28656 v.reset(OpAMD64CMOVWGE)
28657 v.AddArg3(y, x, cond)
28658 return true
28659 }
28660
28661
28662
28663 for {
28664 t := v.Type
28665 x := v_0
28666 y := v_1
28667 if v_2.Op != OpAMD64SETA {
28668 break
28669 }
28670 cond := v_2.Args[0]
28671 if !(is16BitInt(t)) {
28672 break
28673 }
28674 v.reset(OpAMD64CMOVWHI)
28675 v.AddArg3(y, x, cond)
28676 return true
28677 }
28678
28679
28680
28681 for {
28682 t := v.Type
28683 x := v_0
28684 y := v_1
28685 if v_2.Op != OpAMD64SETB {
28686 break
28687 }
28688 cond := v_2.Args[0]
28689 if !(is16BitInt(t)) {
28690 break
28691 }
28692 v.reset(OpAMD64CMOVWCS)
28693 v.AddArg3(y, x, cond)
28694 return true
28695 }
28696
28697
28698
28699 for {
28700 t := v.Type
28701 x := v_0
28702 y := v_1
28703 if v_2.Op != OpAMD64SETAE {
28704 break
28705 }
28706 cond := v_2.Args[0]
28707 if !(is16BitInt(t)) {
28708 break
28709 }
28710 v.reset(OpAMD64CMOVWCC)
28711 v.AddArg3(y, x, cond)
28712 return true
28713 }
28714
28715
28716
28717 for {
28718 t := v.Type
28719 x := v_0
28720 y := v_1
28721 if v_2.Op != OpAMD64SETBE {
28722 break
28723 }
28724 cond := v_2.Args[0]
28725 if !(is16BitInt(t)) {
28726 break
28727 }
28728 v.reset(OpAMD64CMOVWLS)
28729 v.AddArg3(y, x, cond)
28730 return true
28731 }
28732
28733
28734
28735 for {
28736 t := v.Type
28737 x := v_0
28738 y := v_1
28739 if v_2.Op != OpAMD64SETEQF {
28740 break
28741 }
28742 cond := v_2.Args[0]
28743 if !(is16BitInt(t)) {
28744 break
28745 }
28746 v.reset(OpAMD64CMOVWEQF)
28747 v.AddArg3(y, x, cond)
28748 return true
28749 }
28750
28751
28752
28753 for {
28754 t := v.Type
28755 x := v_0
28756 y := v_1
28757 if v_2.Op != OpAMD64SETNEF {
28758 break
28759 }
28760 cond := v_2.Args[0]
28761 if !(is16BitInt(t)) {
28762 break
28763 }
28764 v.reset(OpAMD64CMOVWNEF)
28765 v.AddArg3(y, x, cond)
28766 return true
28767 }
28768
28769
28770
28771 for {
28772 t := v.Type
28773 x := v_0
28774 y := v_1
28775 if v_2.Op != OpAMD64SETGF {
28776 break
28777 }
28778 cond := v_2.Args[0]
28779 if !(is16BitInt(t)) {
28780 break
28781 }
28782 v.reset(OpAMD64CMOVWGTF)
28783 v.AddArg3(y, x, cond)
28784 return true
28785 }
28786
28787
28788
28789 for {
28790 t := v.Type
28791 x := v_0
28792 y := v_1
28793 if v_2.Op != OpAMD64SETGEF {
28794 break
28795 }
28796 cond := v_2.Args[0]
28797 if !(is16BitInt(t)) {
28798 break
28799 }
28800 v.reset(OpAMD64CMOVWGEF)
28801 v.AddArg3(y, x, cond)
28802 return true
28803 }
28804
28805
28806
28807 for {
28808 t := v.Type
28809 x := v_0
28810 y := v_1
28811 check := v_2
28812 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
28813 break
28814 }
28815 v.reset(OpCondSelect)
28816 v.Type = t
28817 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
28818 v0.AddArg(check)
28819 v.AddArg3(x, y, v0)
28820 return true
28821 }
28822
28823
28824
28825 for {
28826 t := v.Type
28827 x := v_0
28828 y := v_1
28829 check := v_2
28830 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
28831 break
28832 }
28833 v.reset(OpCondSelect)
28834 v.Type = t
28835 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
28836 v0.AddArg(check)
28837 v.AddArg3(x, y, v0)
28838 return true
28839 }
28840
28841
28842
28843 for {
28844 t := v.Type
28845 x := v_0
28846 y := v_1
28847 check := v_2
28848 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
28849 break
28850 }
28851 v.reset(OpCondSelect)
28852 v.Type = t
28853 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
28854 v0.AddArg(check)
28855 v.AddArg3(x, y, v0)
28856 return true
28857 }
28858
28859
28860
28861 for {
28862 t := v.Type
28863 x := v_0
28864 y := v_1
28865 check := v_2
28866 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
28867 break
28868 }
28869 v.reset(OpAMD64CMOVQNE)
28870 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28871 v0.AuxInt = int32ToAuxInt(0)
28872 v0.AddArg(check)
28873 v.AddArg3(y, x, v0)
28874 return true
28875 }
28876
28877
28878
28879 for {
28880 t := v.Type
28881 x := v_0
28882 y := v_1
28883 check := v_2
28884 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
28885 break
28886 }
28887 v.reset(OpAMD64CMOVLNE)
28888 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28889 v0.AuxInt = int32ToAuxInt(0)
28890 v0.AddArg(check)
28891 v.AddArg3(y, x, v0)
28892 return true
28893 }
28894
28895
28896
28897 for {
28898 t := v.Type
28899 x := v_0
28900 y := v_1
28901 check := v_2
28902 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
28903 break
28904 }
28905 v.reset(OpAMD64CMOVWNE)
28906 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28907 v0.AuxInt = int32ToAuxInt(0)
28908 v0.AddArg(check)
28909 v.AddArg3(y, x, v0)
28910 return true
28911 }
28912 return false
28913 }
28914 func rewriteValueAMD64_OpConst16(v *Value) bool {
28915
28916
28917 for {
28918 c := auxIntToInt16(v.AuxInt)
28919 v.reset(OpAMD64MOVLconst)
28920 v.AuxInt = int32ToAuxInt(int32(c))
28921 return true
28922 }
28923 }
28924 func rewriteValueAMD64_OpConst8(v *Value) bool {
28925
28926
28927 for {
28928 c := auxIntToInt8(v.AuxInt)
28929 v.reset(OpAMD64MOVLconst)
28930 v.AuxInt = int32ToAuxInt(int32(c))
28931 return true
28932 }
28933 }
28934 func rewriteValueAMD64_OpConstBool(v *Value) bool {
28935
28936
28937 for {
28938 c := auxIntToBool(v.AuxInt)
28939 v.reset(OpAMD64MOVLconst)
28940 v.AuxInt = int32ToAuxInt(b2i32(c))
28941 return true
28942 }
28943 }
28944 func rewriteValueAMD64_OpConstNil(v *Value) bool {
28945
28946
28947 for {
28948 v.reset(OpAMD64MOVQconst)
28949 v.AuxInt = int64ToAuxInt(0)
28950 return true
28951 }
28952 }
28953 func rewriteValueAMD64_OpCtz16(v *Value) bool {
28954 v_0 := v.Args[0]
28955 b := v.Block
28956 typ := &b.Func.Config.Types
28957
28958
28959 for {
28960 x := v_0
28961 v.reset(OpAMD64BSFL)
28962 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
28963 v0.AuxInt = int8ToAuxInt(16)
28964 v0.AddArg(x)
28965 v.AddArg(v0)
28966 return true
28967 }
28968 }
28969 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
28970 v_0 := v.Args[0]
28971
28972
28973
28974 for {
28975 x := v_0
28976 if !(buildcfg.GOAMD64 >= 3) {
28977 break
28978 }
28979 v.reset(OpAMD64TZCNTL)
28980 v.AddArg(x)
28981 return true
28982 }
28983
28984
28985
28986 for {
28987 x := v_0
28988 if !(buildcfg.GOAMD64 < 3) {
28989 break
28990 }
28991 v.reset(OpAMD64BSFL)
28992 v.AddArg(x)
28993 return true
28994 }
28995 return false
28996 }
28997 func rewriteValueAMD64_OpCtz32(v *Value) bool {
28998 v_0 := v.Args[0]
28999 b := v.Block
29000 typ := &b.Func.Config.Types
29001
29002
29003
29004 for {
29005 x := v_0
29006 if !(buildcfg.GOAMD64 >= 3) {
29007 break
29008 }
29009 v.reset(OpAMD64TZCNTL)
29010 v.AddArg(x)
29011 return true
29012 }
29013
29014
29015
29016 for {
29017 x := v_0
29018 if !(buildcfg.GOAMD64 < 3) {
29019 break
29020 }
29021 v.reset(OpSelect0)
29022 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29023 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
29024 v1.AuxInt = int8ToAuxInt(32)
29025 v1.AddArg(x)
29026 v0.AddArg(v1)
29027 v.AddArg(v0)
29028 return true
29029 }
29030 return false
29031 }
29032 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
29033 v_0 := v.Args[0]
29034
29035
29036
29037 for {
29038 x := v_0
29039 if !(buildcfg.GOAMD64 >= 3) {
29040 break
29041 }
29042 v.reset(OpAMD64TZCNTL)
29043 v.AddArg(x)
29044 return true
29045 }
29046
29047
29048
29049 for {
29050 x := v_0
29051 if !(buildcfg.GOAMD64 < 3) {
29052 break
29053 }
29054 v.reset(OpAMD64BSFL)
29055 v.AddArg(x)
29056 return true
29057 }
29058 return false
29059 }
29060 func rewriteValueAMD64_OpCtz64(v *Value) bool {
29061 v_0 := v.Args[0]
29062 b := v.Block
29063 typ := &b.Func.Config.Types
29064
29065
29066
29067 for {
29068 x := v_0
29069 if !(buildcfg.GOAMD64 >= 3) {
29070 break
29071 }
29072 v.reset(OpAMD64TZCNTQ)
29073 v.AddArg(x)
29074 return true
29075 }
29076
29077
29078
29079 for {
29080 t := v.Type
29081 x := v_0
29082 if !(buildcfg.GOAMD64 < 3) {
29083 break
29084 }
29085 v.reset(OpAMD64CMOVQEQ)
29086 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29087 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29088 v1.AddArg(x)
29089 v0.AddArg(v1)
29090 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
29091 v2.AuxInt = int64ToAuxInt(64)
29092 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29093 v3.AddArg(v1)
29094 v.AddArg3(v0, v2, v3)
29095 return true
29096 }
29097 return false
29098 }
29099 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
29100 v_0 := v.Args[0]
29101 b := v.Block
29102 typ := &b.Func.Config.Types
29103
29104
29105
29106 for {
29107 x := v_0
29108 if !(buildcfg.GOAMD64 >= 3) {
29109 break
29110 }
29111 v.reset(OpAMD64TZCNTQ)
29112 v.AddArg(x)
29113 return true
29114 }
29115
29116
29117
29118 for {
29119 x := v_0
29120 if !(buildcfg.GOAMD64 < 3) {
29121 break
29122 }
29123 v.reset(OpSelect0)
29124 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29125 v0.AddArg(x)
29126 v.AddArg(v0)
29127 return true
29128 }
29129 return false
29130 }
29131 func rewriteValueAMD64_OpCtz8(v *Value) bool {
29132 v_0 := v.Args[0]
29133 b := v.Block
29134 typ := &b.Func.Config.Types
29135
29136
29137 for {
29138 x := v_0
29139 v.reset(OpAMD64BSFL)
29140 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
29141 v0.AuxInt = int8ToAuxInt(8)
29142 v0.AddArg(x)
29143 v.AddArg(v0)
29144 return true
29145 }
29146 }
29147 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
29148 v_0 := v.Args[0]
29149
29150
29151
29152 for {
29153 x := v_0
29154 if !(buildcfg.GOAMD64 >= 3) {
29155 break
29156 }
29157 v.reset(OpAMD64TZCNTL)
29158 v.AddArg(x)
29159 return true
29160 }
29161
29162
29163
29164 for {
29165 x := v_0
29166 if !(buildcfg.GOAMD64 < 3) {
29167 break
29168 }
29169 v.reset(OpAMD64BSFL)
29170 v.AddArg(x)
29171 return true
29172 }
29173 return false
29174 }
29175 func rewriteValueAMD64_OpDiv16(v *Value) bool {
29176 v_1 := v.Args[1]
29177 v_0 := v.Args[0]
29178 b := v.Block
29179 typ := &b.Func.Config.Types
29180
29181
29182 for {
29183 a := auxIntToBool(v.AuxInt)
29184 x := v_0
29185 y := v_1
29186 v.reset(OpSelect0)
29187 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
29188 v0.AuxInt = boolToAuxInt(a)
29189 v0.AddArg2(x, y)
29190 v.AddArg(v0)
29191 return true
29192 }
29193 }
29194 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
29195 v_1 := v.Args[1]
29196 v_0 := v.Args[0]
29197 b := v.Block
29198 typ := &b.Func.Config.Types
29199
29200
29201 for {
29202 x := v_0
29203 y := v_1
29204 v.reset(OpSelect0)
29205 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
29206 v0.AddArg2(x, y)
29207 v.AddArg(v0)
29208 return true
29209 }
29210 }
29211 func rewriteValueAMD64_OpDiv32(v *Value) bool {
29212 v_1 := v.Args[1]
29213 v_0 := v.Args[0]
29214 b := v.Block
29215 typ := &b.Func.Config.Types
29216
29217
29218 for {
29219 a := auxIntToBool(v.AuxInt)
29220 x := v_0
29221 y := v_1
29222 v.reset(OpSelect0)
29223 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
29224 v0.AuxInt = boolToAuxInt(a)
29225 v0.AddArg2(x, y)
29226 v.AddArg(v0)
29227 return true
29228 }
29229 }
29230 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
29231 v_1 := v.Args[1]
29232 v_0 := v.Args[0]
29233 b := v.Block
29234 typ := &b.Func.Config.Types
29235
29236
29237 for {
29238 x := v_0
29239 y := v_1
29240 v.reset(OpSelect0)
29241 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
29242 v0.AddArg2(x, y)
29243 v.AddArg(v0)
29244 return true
29245 }
29246 }
29247 func rewriteValueAMD64_OpDiv64(v *Value) bool {
29248 v_1 := v.Args[1]
29249 v_0 := v.Args[0]
29250 b := v.Block
29251 typ := &b.Func.Config.Types
29252
29253
29254 for {
29255 a := auxIntToBool(v.AuxInt)
29256 x := v_0
29257 y := v_1
29258 v.reset(OpSelect0)
29259 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
29260 v0.AuxInt = boolToAuxInt(a)
29261 v0.AddArg2(x, y)
29262 v.AddArg(v0)
29263 return true
29264 }
29265 }
29266 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
29267 v_1 := v.Args[1]
29268 v_0 := v.Args[0]
29269 b := v.Block
29270 typ := &b.Func.Config.Types
29271
29272
29273 for {
29274 x := v_0
29275 y := v_1
29276 v.reset(OpSelect0)
29277 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
29278 v0.AddArg2(x, y)
29279 v.AddArg(v0)
29280 return true
29281 }
29282 }
29283 func rewriteValueAMD64_OpDiv8(v *Value) bool {
29284 v_1 := v.Args[1]
29285 v_0 := v.Args[0]
29286 b := v.Block
29287 typ := &b.Func.Config.Types
29288
29289
29290 for {
29291 x := v_0
29292 y := v_1
29293 v.reset(OpSelect0)
29294 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
29295 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
29296 v1.AddArg(x)
29297 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
29298 v2.AddArg(y)
29299 v0.AddArg2(v1, v2)
29300 v.AddArg(v0)
29301 return true
29302 }
29303 }
29304 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
29305 v_1 := v.Args[1]
29306 v_0 := v.Args[0]
29307 b := v.Block
29308 typ := &b.Func.Config.Types
29309
29310
29311 for {
29312 x := v_0
29313 y := v_1
29314 v.reset(OpSelect0)
29315 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
29316 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
29317 v1.AddArg(x)
29318 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
29319 v2.AddArg(y)
29320 v0.AddArg2(v1, v2)
29321 v.AddArg(v0)
29322 return true
29323 }
29324 }
29325 func rewriteValueAMD64_OpEq16(v *Value) bool {
29326 v_1 := v.Args[1]
29327 v_0 := v.Args[0]
29328 b := v.Block
29329
29330
29331 for {
29332 x := v_0
29333 y := v_1
29334 v.reset(OpAMD64SETEQ)
29335 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
29336 v0.AddArg2(x, y)
29337 v.AddArg(v0)
29338 return true
29339 }
29340 }
29341 func rewriteValueAMD64_OpEq32(v *Value) bool {
29342 v_1 := v.Args[1]
29343 v_0 := v.Args[0]
29344 b := v.Block
29345
29346
29347 for {
29348 x := v_0
29349 y := v_1
29350 v.reset(OpAMD64SETEQ)
29351 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
29352 v0.AddArg2(x, y)
29353 v.AddArg(v0)
29354 return true
29355 }
29356 }
29357 func rewriteValueAMD64_OpEq32F(v *Value) bool {
29358 v_1 := v.Args[1]
29359 v_0 := v.Args[0]
29360 b := v.Block
29361
29362
29363 for {
29364 x := v_0
29365 y := v_1
29366 v.reset(OpAMD64SETEQF)
29367 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
29368 v0.AddArg2(x, y)
29369 v.AddArg(v0)
29370 return true
29371 }
29372 }
29373 func rewriteValueAMD64_OpEq64(v *Value) bool {
29374 v_1 := v.Args[1]
29375 v_0 := v.Args[0]
29376 b := v.Block
29377
29378
29379 for {
29380 x := v_0
29381 y := v_1
29382 v.reset(OpAMD64SETEQ)
29383 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29384 v0.AddArg2(x, y)
29385 v.AddArg(v0)
29386 return true
29387 }
29388 }
29389 func rewriteValueAMD64_OpEq64F(v *Value) bool {
29390 v_1 := v.Args[1]
29391 v_0 := v.Args[0]
29392 b := v.Block
29393
29394
29395 for {
29396 x := v_0
29397 y := v_1
29398 v.reset(OpAMD64SETEQF)
29399 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
29400 v0.AddArg2(x, y)
29401 v.AddArg(v0)
29402 return true
29403 }
29404 }
29405 func rewriteValueAMD64_OpEq8(v *Value) bool {
29406 v_1 := v.Args[1]
29407 v_0 := v.Args[0]
29408 b := v.Block
29409
29410
29411 for {
29412 x := v_0
29413 y := v_1
29414 v.reset(OpAMD64SETEQ)
29415 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
29416 v0.AddArg2(x, y)
29417 v.AddArg(v0)
29418 return true
29419 }
29420 }
29421 func rewriteValueAMD64_OpEqB(v *Value) bool {
29422 v_1 := v.Args[1]
29423 v_0 := v.Args[0]
29424 b := v.Block
29425
29426
29427 for {
29428 x := v_0
29429 y := v_1
29430 v.reset(OpAMD64SETEQ)
29431 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
29432 v0.AddArg2(x, y)
29433 v.AddArg(v0)
29434 return true
29435 }
29436 }
29437 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
29438 v_1 := v.Args[1]
29439 v_0 := v.Args[0]
29440 b := v.Block
29441
29442
29443 for {
29444 x := v_0
29445 y := v_1
29446 v.reset(OpAMD64SETEQ)
29447 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29448 v0.AddArg2(x, y)
29449 v.AddArg(v0)
29450 return true
29451 }
29452 }
29453 func rewriteValueAMD64_OpFMA(v *Value) bool {
29454 v_2 := v.Args[2]
29455 v_1 := v.Args[1]
29456 v_0 := v.Args[0]
29457
29458
29459 for {
29460 x := v_0
29461 y := v_1
29462 z := v_2
29463 v.reset(OpAMD64VFMADD231SD)
29464 v.AddArg3(z, x, y)
29465 return true
29466 }
29467 }
29468 func rewriteValueAMD64_OpFloor(v *Value) bool {
29469 v_0 := v.Args[0]
29470
29471
29472 for {
29473 x := v_0
29474 v.reset(OpAMD64ROUNDSD)
29475 v.AuxInt = int8ToAuxInt(1)
29476 v.AddArg(x)
29477 return true
29478 }
29479 }
29480 func rewriteValueAMD64_OpGetG(v *Value) bool {
29481 v_0 := v.Args[0]
29482
29483
29484
29485 for {
29486 mem := v_0
29487 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
29488 break
29489 }
29490 v.reset(OpAMD64LoweredGetG)
29491 v.AddArg(mem)
29492 return true
29493 }
29494 return false
29495 }
29496 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
29497 b := v.Block
29498 typ := &b.Func.Config.Types
29499
29500
29501 for {
29502 s := auxToSym(v.Aux)
29503 v.reset(OpAMD64SETNE)
29504 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29505 v0.AuxInt = int32ToAuxInt(0)
29506 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
29507 v1.Aux = symToAux(s)
29508 v0.AddArg(v1)
29509 v.AddArg(v0)
29510 return true
29511 }
29512 }
29513 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
29514 v_1 := v.Args[1]
29515 v_0 := v.Args[0]
29516 b := v.Block
29517
29518
29519 for {
29520 idx := v_0
29521 len := v_1
29522 v.reset(OpAMD64SETB)
29523 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29524 v0.AddArg2(idx, len)
29525 v.AddArg(v0)
29526 return true
29527 }
29528 }
29529 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
29530 v_0 := v.Args[0]
29531 b := v.Block
29532
29533
29534 for {
29535 p := v_0
29536 v.reset(OpAMD64SETNE)
29537 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
29538 v0.AddArg2(p, p)
29539 v.AddArg(v0)
29540 return true
29541 }
29542 }
29543 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
29544 v_1 := v.Args[1]
29545 v_0 := v.Args[0]
29546 b := v.Block
29547
29548
29549 for {
29550 idx := v_0
29551 len := v_1
29552 v.reset(OpAMD64SETBE)
29553 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29554 v0.AddArg2(idx, len)
29555 v.AddArg(v0)
29556 return true
29557 }
29558 }
29559 func rewriteValueAMD64_OpLeq16(v *Value) bool {
29560 v_1 := v.Args[1]
29561 v_0 := v.Args[0]
29562 b := v.Block
29563
29564
29565 for {
29566 x := v_0
29567 y := v_1
29568 v.reset(OpAMD64SETLE)
29569 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
29570 v0.AddArg2(x, y)
29571 v.AddArg(v0)
29572 return true
29573 }
29574 }
29575 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
29576 v_1 := v.Args[1]
29577 v_0 := v.Args[0]
29578 b := v.Block
29579
29580
29581 for {
29582 x := v_0
29583 y := v_1
29584 v.reset(OpAMD64SETBE)
29585 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
29586 v0.AddArg2(x, y)
29587 v.AddArg(v0)
29588 return true
29589 }
29590 }
29591 func rewriteValueAMD64_OpLeq32(v *Value) bool {
29592 v_1 := v.Args[1]
29593 v_0 := v.Args[0]
29594 b := v.Block
29595
29596
29597 for {
29598 x := v_0
29599 y := v_1
29600 v.reset(OpAMD64SETLE)
29601 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
29602 v0.AddArg2(x, y)
29603 v.AddArg(v0)
29604 return true
29605 }
29606 }
29607 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
29608 v_1 := v.Args[1]
29609 v_0 := v.Args[0]
29610 b := v.Block
29611
29612
29613 for {
29614 x := v_0
29615 y := v_1
29616 v.reset(OpAMD64SETGEF)
29617 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
29618 v0.AddArg2(y, x)
29619 v.AddArg(v0)
29620 return true
29621 }
29622 }
29623 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
29624 v_1 := v.Args[1]
29625 v_0 := v.Args[0]
29626 b := v.Block
29627
29628
29629 for {
29630 x := v_0
29631 y := v_1
29632 v.reset(OpAMD64SETBE)
29633 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
29634 v0.AddArg2(x, y)
29635 v.AddArg(v0)
29636 return true
29637 }
29638 }
29639 func rewriteValueAMD64_OpLeq64(v *Value) bool {
29640 v_1 := v.Args[1]
29641 v_0 := v.Args[0]
29642 b := v.Block
29643
29644
29645 for {
29646 x := v_0
29647 y := v_1
29648 v.reset(OpAMD64SETLE)
29649 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29650 v0.AddArg2(x, y)
29651 v.AddArg(v0)
29652 return true
29653 }
29654 }
29655 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
29656 v_1 := v.Args[1]
29657 v_0 := v.Args[0]
29658 b := v.Block
29659
29660
29661 for {
29662 x := v_0
29663 y := v_1
29664 v.reset(OpAMD64SETGEF)
29665 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
29666 v0.AddArg2(y, x)
29667 v.AddArg(v0)
29668 return true
29669 }
29670 }
29671 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
29672 v_1 := v.Args[1]
29673 v_0 := v.Args[0]
29674 b := v.Block
29675
29676
29677 for {
29678 x := v_0
29679 y := v_1
29680 v.reset(OpAMD64SETBE)
29681 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29682 v0.AddArg2(x, y)
29683 v.AddArg(v0)
29684 return true
29685 }
29686 }
29687 func rewriteValueAMD64_OpLeq8(v *Value) bool {
29688 v_1 := v.Args[1]
29689 v_0 := v.Args[0]
29690 b := v.Block
29691
29692
29693 for {
29694 x := v_0
29695 y := v_1
29696 v.reset(OpAMD64SETLE)
29697 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
29698 v0.AddArg2(x, y)
29699 v.AddArg(v0)
29700 return true
29701 }
29702 }
29703 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
29704 v_1 := v.Args[1]
29705 v_0 := v.Args[0]
29706 b := v.Block
29707
29708
29709 for {
29710 x := v_0
29711 y := v_1
29712 v.reset(OpAMD64SETBE)
29713 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
29714 v0.AddArg2(x, y)
29715 v.AddArg(v0)
29716 return true
29717 }
29718 }
29719 func rewriteValueAMD64_OpLess16(v *Value) bool {
29720 v_1 := v.Args[1]
29721 v_0 := v.Args[0]
29722 b := v.Block
29723
29724
29725 for {
29726 x := v_0
29727 y := v_1
29728 v.reset(OpAMD64SETL)
29729 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
29730 v0.AddArg2(x, y)
29731 v.AddArg(v0)
29732 return true
29733 }
29734 }
29735 func rewriteValueAMD64_OpLess16U(v *Value) bool {
29736 v_1 := v.Args[1]
29737 v_0 := v.Args[0]
29738 b := v.Block
29739
29740
29741 for {
29742 x := v_0
29743 y := v_1
29744 v.reset(OpAMD64SETB)
29745 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
29746 v0.AddArg2(x, y)
29747 v.AddArg(v0)
29748 return true
29749 }
29750 }
29751 func rewriteValueAMD64_OpLess32(v *Value) bool {
29752 v_1 := v.Args[1]
29753 v_0 := v.Args[0]
29754 b := v.Block
29755
29756
29757 for {
29758 x := v_0
29759 y := v_1
29760 v.reset(OpAMD64SETL)
29761 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
29762 v0.AddArg2(x, y)
29763 v.AddArg(v0)
29764 return true
29765 }
29766 }
29767 func rewriteValueAMD64_OpLess32F(v *Value) bool {
29768 v_1 := v.Args[1]
29769 v_0 := v.Args[0]
29770 b := v.Block
29771
29772
29773 for {
29774 x := v_0
29775 y := v_1
29776 v.reset(OpAMD64SETGF)
29777 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
29778 v0.AddArg2(y, x)
29779 v.AddArg(v0)
29780 return true
29781 }
29782 }
29783 func rewriteValueAMD64_OpLess32U(v *Value) bool {
29784 v_1 := v.Args[1]
29785 v_0 := v.Args[0]
29786 b := v.Block
29787
29788
29789 for {
29790 x := v_0
29791 y := v_1
29792 v.reset(OpAMD64SETB)
29793 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
29794 v0.AddArg2(x, y)
29795 v.AddArg(v0)
29796 return true
29797 }
29798 }
29799 func rewriteValueAMD64_OpLess64(v *Value) bool {
29800 v_1 := v.Args[1]
29801 v_0 := v.Args[0]
29802 b := v.Block
29803
29804
29805 for {
29806 x := v_0
29807 y := v_1
29808 v.reset(OpAMD64SETL)
29809 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29810 v0.AddArg2(x, y)
29811 v.AddArg(v0)
29812 return true
29813 }
29814 }
29815 func rewriteValueAMD64_OpLess64F(v *Value) bool {
29816 v_1 := v.Args[1]
29817 v_0 := v.Args[0]
29818 b := v.Block
29819
29820
29821 for {
29822 x := v_0
29823 y := v_1
29824 v.reset(OpAMD64SETGF)
29825 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
29826 v0.AddArg2(y, x)
29827 v.AddArg(v0)
29828 return true
29829 }
29830 }
29831 func rewriteValueAMD64_OpLess64U(v *Value) bool {
29832 v_1 := v.Args[1]
29833 v_0 := v.Args[0]
29834 b := v.Block
29835
29836
29837 for {
29838 x := v_0
29839 y := v_1
29840 v.reset(OpAMD64SETB)
29841 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29842 v0.AddArg2(x, y)
29843 v.AddArg(v0)
29844 return true
29845 }
29846 }
29847 func rewriteValueAMD64_OpLess8(v *Value) bool {
29848 v_1 := v.Args[1]
29849 v_0 := v.Args[0]
29850 b := v.Block
29851
29852
29853 for {
29854 x := v_0
29855 y := v_1
29856 v.reset(OpAMD64SETL)
29857 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
29858 v0.AddArg2(x, y)
29859 v.AddArg(v0)
29860 return true
29861 }
29862 }
29863 func rewriteValueAMD64_OpLess8U(v *Value) bool {
29864 v_1 := v.Args[1]
29865 v_0 := v.Args[0]
29866 b := v.Block
29867
29868
29869 for {
29870 x := v_0
29871 y := v_1
29872 v.reset(OpAMD64SETB)
29873 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
29874 v0.AddArg2(x, y)
29875 v.AddArg(v0)
29876 return true
29877 }
29878 }
29879 func rewriteValueAMD64_OpLoad(v *Value) bool {
29880 v_1 := v.Args[1]
29881 v_0 := v.Args[0]
29882
29883
29884
29885 for {
29886 t := v.Type
29887 ptr := v_0
29888 mem := v_1
29889 if !(is64BitInt(t) || isPtr(t)) {
29890 break
29891 }
29892 v.reset(OpAMD64MOVQload)
29893 v.AddArg2(ptr, mem)
29894 return true
29895 }
29896
29897
29898
29899 for {
29900 t := v.Type
29901 ptr := v_0
29902 mem := v_1
29903 if !(is32BitInt(t)) {
29904 break
29905 }
29906 v.reset(OpAMD64MOVLload)
29907 v.AddArg2(ptr, mem)
29908 return true
29909 }
29910
29911
29912
29913 for {
29914 t := v.Type
29915 ptr := v_0
29916 mem := v_1
29917 if !(is16BitInt(t)) {
29918 break
29919 }
29920 v.reset(OpAMD64MOVWload)
29921 v.AddArg2(ptr, mem)
29922 return true
29923 }
29924
29925
29926
29927 for {
29928 t := v.Type
29929 ptr := v_0
29930 mem := v_1
29931 if !(t.IsBoolean() || is8BitInt(t)) {
29932 break
29933 }
29934 v.reset(OpAMD64MOVBload)
29935 v.AddArg2(ptr, mem)
29936 return true
29937 }
29938
29939
29940
29941 for {
29942 t := v.Type
29943 ptr := v_0
29944 mem := v_1
29945 if !(is32BitFloat(t)) {
29946 break
29947 }
29948 v.reset(OpAMD64MOVSSload)
29949 v.AddArg2(ptr, mem)
29950 return true
29951 }
29952
29953
29954
29955 for {
29956 t := v.Type
29957 ptr := v_0
29958 mem := v_1
29959 if !(is64BitFloat(t)) {
29960 break
29961 }
29962 v.reset(OpAMD64MOVSDload)
29963 v.AddArg2(ptr, mem)
29964 return true
29965 }
29966 return false
29967 }
29968 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
29969 v_0 := v.Args[0]
29970
29971
29972 for {
29973 sym := auxToSym(v.Aux)
29974 base := v_0
29975 v.reset(OpAMD64LEAQ)
29976 v.Aux = symToAux(sym)
29977 v.AddArg(base)
29978 return true
29979 }
29980 }
29981 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
29982 v_1 := v.Args[1]
29983 v_0 := v.Args[0]
29984 b := v.Block
29985
29986
29987
29988 for {
29989 t := v.Type
29990 x := v_0
29991 y := v_1
29992 if !(!shiftIsBounded(v)) {
29993 break
29994 }
29995 v.reset(OpAMD64ANDL)
29996 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
29997 v0.AddArg2(x, y)
29998 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29999 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
30000 v2.AuxInt = int16ToAuxInt(32)
30001 v2.AddArg(y)
30002 v1.AddArg(v2)
30003 v.AddArg2(v0, v1)
30004 return true
30005 }
30006
30007
30008
30009 for {
30010 x := v_0
30011 y := v_1
30012 if !(shiftIsBounded(v)) {
30013 break
30014 }
30015 v.reset(OpAMD64SHLL)
30016 v.AddArg2(x, y)
30017 return true
30018 }
30019 return false
30020 }
30021 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
30022 v_1 := v.Args[1]
30023 v_0 := v.Args[0]
30024 b := v.Block
30025
30026
30027
30028 for {
30029 t := v.Type
30030 x := v_0
30031 y := v_1
30032 if !(!shiftIsBounded(v)) {
30033 break
30034 }
30035 v.reset(OpAMD64ANDL)
30036 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30037 v0.AddArg2(x, y)
30038 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30039 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
30040 v2.AuxInt = int32ToAuxInt(32)
30041 v2.AddArg(y)
30042 v1.AddArg(v2)
30043 v.AddArg2(v0, v1)
30044 return true
30045 }
30046
30047
30048
30049 for {
30050 x := v_0
30051 y := v_1
30052 if !(shiftIsBounded(v)) {
30053 break
30054 }
30055 v.reset(OpAMD64SHLL)
30056 v.AddArg2(x, y)
30057 return true
30058 }
30059 return false
30060 }
30061 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
30062 v_1 := v.Args[1]
30063 v_0 := v.Args[0]
30064 b := v.Block
30065
30066
30067
30068 for {
30069 t := v.Type
30070 x := v_0
30071 y := v_1
30072 if !(!shiftIsBounded(v)) {
30073 break
30074 }
30075 v.reset(OpAMD64ANDL)
30076 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30077 v0.AddArg2(x, y)
30078 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30079 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30080 v2.AuxInt = int32ToAuxInt(32)
30081 v2.AddArg(y)
30082 v1.AddArg(v2)
30083 v.AddArg2(v0, v1)
30084 return true
30085 }
30086
30087
30088
30089 for {
30090 x := v_0
30091 y := v_1
30092 if !(shiftIsBounded(v)) {
30093 break
30094 }
30095 v.reset(OpAMD64SHLL)
30096 v.AddArg2(x, y)
30097 return true
30098 }
30099 return false
30100 }
30101 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
30102 v_1 := v.Args[1]
30103 v_0 := v.Args[0]
30104 b := v.Block
30105
30106
30107
30108 for {
30109 t := v.Type
30110 x := v_0
30111 y := v_1
30112 if !(!shiftIsBounded(v)) {
30113 break
30114 }
30115 v.reset(OpAMD64ANDL)
30116 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30117 v0.AddArg2(x, y)
30118 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30119 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
30120 v2.AuxInt = int8ToAuxInt(32)
30121 v2.AddArg(y)
30122 v1.AddArg(v2)
30123 v.AddArg2(v0, v1)
30124 return true
30125 }
30126
30127
30128
30129 for {
30130 x := v_0
30131 y := v_1
30132 if !(shiftIsBounded(v)) {
30133 break
30134 }
30135 v.reset(OpAMD64SHLL)
30136 v.AddArg2(x, y)
30137 return true
30138 }
30139 return false
30140 }
30141 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
30142 v_1 := v.Args[1]
30143 v_0 := v.Args[0]
30144 b := v.Block
30145
30146
30147
30148 for {
30149 t := v.Type
30150 x := v_0
30151 y := v_1
30152 if !(!shiftIsBounded(v)) {
30153 break
30154 }
30155 v.reset(OpAMD64ANDL)
30156 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30157 v0.AddArg2(x, y)
30158 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30159 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
30160 v2.AuxInt = int16ToAuxInt(32)
30161 v2.AddArg(y)
30162 v1.AddArg(v2)
30163 v.AddArg2(v0, v1)
30164 return true
30165 }
30166
30167
30168
30169 for {
30170 x := v_0
30171 y := v_1
30172 if !(shiftIsBounded(v)) {
30173 break
30174 }
30175 v.reset(OpAMD64SHLL)
30176 v.AddArg2(x, y)
30177 return true
30178 }
30179 return false
30180 }
30181 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
30182 v_1 := v.Args[1]
30183 v_0 := v.Args[0]
30184 b := v.Block
30185
30186
30187
30188 for {
30189 t := v.Type
30190 x := v_0
30191 y := v_1
30192 if !(!shiftIsBounded(v)) {
30193 break
30194 }
30195 v.reset(OpAMD64ANDL)
30196 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30197 v0.AddArg2(x, y)
30198 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30199 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
30200 v2.AuxInt = int32ToAuxInt(32)
30201 v2.AddArg(y)
30202 v1.AddArg(v2)
30203 v.AddArg2(v0, v1)
30204 return true
30205 }
30206
30207
30208
30209 for {
30210 x := v_0
30211 y := v_1
30212 if !(shiftIsBounded(v)) {
30213 break
30214 }
30215 v.reset(OpAMD64SHLL)
30216 v.AddArg2(x, y)
30217 return true
30218 }
30219 return false
30220 }
30221 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
30222 v_1 := v.Args[1]
30223 v_0 := v.Args[0]
30224 b := v.Block
30225
30226
30227
30228 for {
30229 t := v.Type
30230 x := v_0
30231 y := v_1
30232 if !(!shiftIsBounded(v)) {
30233 break
30234 }
30235 v.reset(OpAMD64ANDL)
30236 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30237 v0.AddArg2(x, y)
30238 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30239 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30240 v2.AuxInt = int32ToAuxInt(32)
30241 v2.AddArg(y)
30242 v1.AddArg(v2)
30243 v.AddArg2(v0, v1)
30244 return true
30245 }
30246
30247
30248
30249 for {
30250 x := v_0
30251 y := v_1
30252 if !(shiftIsBounded(v)) {
30253 break
30254 }
30255 v.reset(OpAMD64SHLL)
30256 v.AddArg2(x, y)
30257 return true
30258 }
30259 return false
30260 }
30261 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
30262 v_1 := v.Args[1]
30263 v_0 := v.Args[0]
30264 b := v.Block
30265
30266
30267
30268 for {
30269 t := v.Type
30270 x := v_0
30271 y := v_1
30272 if !(!shiftIsBounded(v)) {
30273 break
30274 }
30275 v.reset(OpAMD64ANDL)
30276 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30277 v0.AddArg2(x, y)
30278 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30279 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
30280 v2.AuxInt = int8ToAuxInt(32)
30281 v2.AddArg(y)
30282 v1.AddArg(v2)
30283 v.AddArg2(v0, v1)
30284 return true
30285 }
30286
30287
30288
30289 for {
30290 x := v_0
30291 y := v_1
30292 if !(shiftIsBounded(v)) {
30293 break
30294 }
30295 v.reset(OpAMD64SHLL)
30296 v.AddArg2(x, y)
30297 return true
30298 }
30299 return false
30300 }
30301 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
30302 v_1 := v.Args[1]
30303 v_0 := v.Args[0]
30304 b := v.Block
30305
30306
30307
30308 for {
30309 t := v.Type
30310 x := v_0
30311 y := v_1
30312 if !(!shiftIsBounded(v)) {
30313 break
30314 }
30315 v.reset(OpAMD64ANDQ)
30316 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
30317 v0.AddArg2(x, y)
30318 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
30319 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
30320 v2.AuxInt = int16ToAuxInt(64)
30321 v2.AddArg(y)
30322 v1.AddArg(v2)
30323 v.AddArg2(v0, v1)
30324 return true
30325 }
30326
30327
30328
30329 for {
30330 x := v_0
30331 y := v_1
30332 if !(shiftIsBounded(v)) {
30333 break
30334 }
30335 v.reset(OpAMD64SHLQ)
30336 v.AddArg2(x, y)
30337 return true
30338 }
30339 return false
30340 }
30341 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
30342 v_1 := v.Args[1]
30343 v_0 := v.Args[0]
30344 b := v.Block
30345
30346
30347
30348 for {
30349 t := v.Type
30350 x := v_0
30351 y := v_1
30352 if !(!shiftIsBounded(v)) {
30353 break
30354 }
30355 v.reset(OpAMD64ANDQ)
30356 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
30357 v0.AddArg2(x, y)
30358 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
30359 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
30360 v2.AuxInt = int32ToAuxInt(64)
30361 v2.AddArg(y)
30362 v1.AddArg(v2)
30363 v.AddArg2(v0, v1)
30364 return true
30365 }
30366
30367
30368
30369 for {
30370 x := v_0
30371 y := v_1
30372 if !(shiftIsBounded(v)) {
30373 break
30374 }
30375 v.reset(OpAMD64SHLQ)
30376 v.AddArg2(x, y)
30377 return true
30378 }
30379 return false
30380 }
30381 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
30382 v_1 := v.Args[1]
30383 v_0 := v.Args[0]
30384 b := v.Block
30385
30386
30387
30388 for {
30389 t := v.Type
30390 x := v_0
30391 y := v_1
30392 if !(!shiftIsBounded(v)) {
30393 break
30394 }
30395 v.reset(OpAMD64ANDQ)
30396 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
30397 v0.AddArg2(x, y)
30398 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
30399 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30400 v2.AuxInt = int32ToAuxInt(64)
30401 v2.AddArg(y)
30402 v1.AddArg(v2)
30403 v.AddArg2(v0, v1)
30404 return true
30405 }
30406
30407
30408
30409 for {
30410 x := v_0
30411 y := v_1
30412 if !(shiftIsBounded(v)) {
30413 break
30414 }
30415 v.reset(OpAMD64SHLQ)
30416 v.AddArg2(x, y)
30417 return true
30418 }
30419 return false
30420 }
30421 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
30422 v_1 := v.Args[1]
30423 v_0 := v.Args[0]
30424 b := v.Block
30425
30426
30427
30428 for {
30429 t := v.Type
30430 x := v_0
30431 y := v_1
30432 if !(!shiftIsBounded(v)) {
30433 break
30434 }
30435 v.reset(OpAMD64ANDQ)
30436 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
30437 v0.AddArg2(x, y)
30438 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
30439 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
30440 v2.AuxInt = int8ToAuxInt(64)
30441 v2.AddArg(y)
30442 v1.AddArg(v2)
30443 v.AddArg2(v0, v1)
30444 return true
30445 }
30446
30447
30448
30449 for {
30450 x := v_0
30451 y := v_1
30452 if !(shiftIsBounded(v)) {
30453 break
30454 }
30455 v.reset(OpAMD64SHLQ)
30456 v.AddArg2(x, y)
30457 return true
30458 }
30459 return false
30460 }
30461 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
30462 v_1 := v.Args[1]
30463 v_0 := v.Args[0]
30464 b := v.Block
30465
30466
30467
30468 for {
30469 t := v.Type
30470 x := v_0
30471 y := v_1
30472 if !(!shiftIsBounded(v)) {
30473 break
30474 }
30475 v.reset(OpAMD64ANDL)
30476 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30477 v0.AddArg2(x, y)
30478 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30479 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
30480 v2.AuxInt = int16ToAuxInt(32)
30481 v2.AddArg(y)
30482 v1.AddArg(v2)
30483 v.AddArg2(v0, v1)
30484 return true
30485 }
30486
30487
30488
30489 for {
30490 x := v_0
30491 y := v_1
30492 if !(shiftIsBounded(v)) {
30493 break
30494 }
30495 v.reset(OpAMD64SHLL)
30496 v.AddArg2(x, y)
30497 return true
30498 }
30499 return false
30500 }
30501 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
30502 v_1 := v.Args[1]
30503 v_0 := v.Args[0]
30504 b := v.Block
30505
30506
30507
30508 for {
30509 t := v.Type
30510 x := v_0
30511 y := v_1
30512 if !(!shiftIsBounded(v)) {
30513 break
30514 }
30515 v.reset(OpAMD64ANDL)
30516 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30517 v0.AddArg2(x, y)
30518 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30519 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
30520 v2.AuxInt = int32ToAuxInt(32)
30521 v2.AddArg(y)
30522 v1.AddArg(v2)
30523 v.AddArg2(v0, v1)
30524 return true
30525 }
30526
30527
30528
30529 for {
30530 x := v_0
30531 y := v_1
30532 if !(shiftIsBounded(v)) {
30533 break
30534 }
30535 v.reset(OpAMD64SHLL)
30536 v.AddArg2(x, y)
30537 return true
30538 }
30539 return false
30540 }
30541 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
30542 v_1 := v.Args[1]
30543 v_0 := v.Args[0]
30544 b := v.Block
30545
30546
30547
30548 for {
30549 t := v.Type
30550 x := v_0
30551 y := v_1
30552 if !(!shiftIsBounded(v)) {
30553 break
30554 }
30555 v.reset(OpAMD64ANDL)
30556 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30557 v0.AddArg2(x, y)
30558 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30559 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
30560 v2.AuxInt = int32ToAuxInt(32)
30561 v2.AddArg(y)
30562 v1.AddArg(v2)
30563 v.AddArg2(v0, v1)
30564 return true
30565 }
30566
30567
30568
30569 for {
30570 x := v_0
30571 y := v_1
30572 if !(shiftIsBounded(v)) {
30573 break
30574 }
30575 v.reset(OpAMD64SHLL)
30576 v.AddArg2(x, y)
30577 return true
30578 }
30579 return false
30580 }
30581 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
30582 v_1 := v.Args[1]
30583 v_0 := v.Args[0]
30584 b := v.Block
30585
30586
30587
30588 for {
30589 t := v.Type
30590 x := v_0
30591 y := v_1
30592 if !(!shiftIsBounded(v)) {
30593 break
30594 }
30595 v.reset(OpAMD64ANDL)
30596 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
30597 v0.AddArg2(x, y)
30598 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
30599 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
30600 v2.AuxInt = int8ToAuxInt(32)
30601 v2.AddArg(y)
30602 v1.AddArg(v2)
30603 v.AddArg2(v0, v1)
30604 return true
30605 }
30606
30607
30608
30609 for {
30610 x := v_0
30611 y := v_1
30612 if !(shiftIsBounded(v)) {
30613 break
30614 }
30615 v.reset(OpAMD64SHLL)
30616 v.AddArg2(x, y)
30617 return true
30618 }
30619 return false
30620 }
30621 func rewriteValueAMD64_OpMod16(v *Value) bool {
30622 v_1 := v.Args[1]
30623 v_0 := v.Args[0]
30624 b := v.Block
30625 typ := &b.Func.Config.Types
30626
30627
30628 for {
30629 a := auxIntToBool(v.AuxInt)
30630 x := v_0
30631 y := v_1
30632 v.reset(OpSelect1)
30633 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
30634 v0.AuxInt = boolToAuxInt(a)
30635 v0.AddArg2(x, y)
30636 v.AddArg(v0)
30637 return true
30638 }
30639 }
30640 func rewriteValueAMD64_OpMod16u(v *Value) bool {
30641 v_1 := v.Args[1]
30642 v_0 := v.Args[0]
30643 b := v.Block
30644 typ := &b.Func.Config.Types
30645
30646
30647 for {
30648 x := v_0
30649 y := v_1
30650 v.reset(OpSelect1)
30651 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
30652 v0.AddArg2(x, y)
30653 v.AddArg(v0)
30654 return true
30655 }
30656 }
30657 func rewriteValueAMD64_OpMod32(v *Value) bool {
30658 v_1 := v.Args[1]
30659 v_0 := v.Args[0]
30660 b := v.Block
30661 typ := &b.Func.Config.Types
30662
30663
30664 for {
30665 a := auxIntToBool(v.AuxInt)
30666 x := v_0
30667 y := v_1
30668 v.reset(OpSelect1)
30669 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
30670 v0.AuxInt = boolToAuxInt(a)
30671 v0.AddArg2(x, y)
30672 v.AddArg(v0)
30673 return true
30674 }
30675 }
30676 func rewriteValueAMD64_OpMod32u(v *Value) bool {
30677 v_1 := v.Args[1]
30678 v_0 := v.Args[0]
30679 b := v.Block
30680 typ := &b.Func.Config.Types
30681
30682
30683 for {
30684 x := v_0
30685 y := v_1
30686 v.reset(OpSelect1)
30687 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
30688 v0.AddArg2(x, y)
30689 v.AddArg(v0)
30690 return true
30691 }
30692 }
30693 func rewriteValueAMD64_OpMod64(v *Value) bool {
30694 v_1 := v.Args[1]
30695 v_0 := v.Args[0]
30696 b := v.Block
30697 typ := &b.Func.Config.Types
30698
30699
30700 for {
30701 a := auxIntToBool(v.AuxInt)
30702 x := v_0
30703 y := v_1
30704 v.reset(OpSelect1)
30705 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
30706 v0.AuxInt = boolToAuxInt(a)
30707 v0.AddArg2(x, y)
30708 v.AddArg(v0)
30709 return true
30710 }
30711 }
30712 func rewriteValueAMD64_OpMod64u(v *Value) bool {
30713 v_1 := v.Args[1]
30714 v_0 := v.Args[0]
30715 b := v.Block
30716 typ := &b.Func.Config.Types
30717
30718
30719 for {
30720 x := v_0
30721 y := v_1
30722 v.reset(OpSelect1)
30723 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
30724 v0.AddArg2(x, y)
30725 v.AddArg(v0)
30726 return true
30727 }
30728 }
30729 func rewriteValueAMD64_OpMod8(v *Value) bool {
30730 v_1 := v.Args[1]
30731 v_0 := v.Args[0]
30732 b := v.Block
30733 typ := &b.Func.Config.Types
30734
30735
30736 for {
30737 x := v_0
30738 y := v_1
30739 v.reset(OpSelect1)
30740 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
30741 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
30742 v1.AddArg(x)
30743 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
30744 v2.AddArg(y)
30745 v0.AddArg2(v1, v2)
30746 v.AddArg(v0)
30747 return true
30748 }
30749 }
30750 func rewriteValueAMD64_OpMod8u(v *Value) bool {
30751 v_1 := v.Args[1]
30752 v_0 := v.Args[0]
30753 b := v.Block
30754 typ := &b.Func.Config.Types
30755
30756
30757 for {
30758 x := v_0
30759 y := v_1
30760 v.reset(OpSelect1)
30761 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
30762 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
30763 v1.AddArg(x)
30764 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
30765 v2.AddArg(y)
30766 v0.AddArg2(v1, v2)
30767 v.AddArg(v0)
30768 return true
30769 }
30770 }
30771 func rewriteValueAMD64_OpMove(v *Value) bool {
30772 v_2 := v.Args[2]
30773 v_1 := v.Args[1]
30774 v_0 := v.Args[0]
30775 b := v.Block
30776 config := b.Func.Config
30777 typ := &b.Func.Config.Types
30778
30779
30780 for {
30781 if auxIntToInt64(v.AuxInt) != 0 {
30782 break
30783 }
30784 mem := v_2
30785 v.copyOf(mem)
30786 return true
30787 }
30788
30789
30790 for {
30791 if auxIntToInt64(v.AuxInt) != 1 {
30792 break
30793 }
30794 dst := v_0
30795 src := v_1
30796 mem := v_2
30797 v.reset(OpAMD64MOVBstore)
30798 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
30799 v0.AddArg2(src, mem)
30800 v.AddArg3(dst, v0, mem)
30801 return true
30802 }
30803
30804
30805 for {
30806 if auxIntToInt64(v.AuxInt) != 2 {
30807 break
30808 }
30809 dst := v_0
30810 src := v_1
30811 mem := v_2
30812 v.reset(OpAMD64MOVWstore)
30813 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
30814 v0.AddArg2(src, mem)
30815 v.AddArg3(dst, v0, mem)
30816 return true
30817 }
30818
30819
30820 for {
30821 if auxIntToInt64(v.AuxInt) != 4 {
30822 break
30823 }
30824 dst := v_0
30825 src := v_1
30826 mem := v_2
30827 v.reset(OpAMD64MOVLstore)
30828 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
30829 v0.AddArg2(src, mem)
30830 v.AddArg3(dst, v0, mem)
30831 return true
30832 }
30833
30834
30835 for {
30836 if auxIntToInt64(v.AuxInt) != 8 {
30837 break
30838 }
30839 dst := v_0
30840 src := v_1
30841 mem := v_2
30842 v.reset(OpAMD64MOVQstore)
30843 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
30844 v0.AddArg2(src, mem)
30845 v.AddArg3(dst, v0, mem)
30846 return true
30847 }
30848
30849
30850
30851 for {
30852 if auxIntToInt64(v.AuxInt) != 16 {
30853 break
30854 }
30855 dst := v_0
30856 src := v_1
30857 mem := v_2
30858 if !(config.useSSE) {
30859 break
30860 }
30861 v.reset(OpAMD64MOVOstore)
30862 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
30863 v0.AddArg2(src, mem)
30864 v.AddArg3(dst, v0, mem)
30865 return true
30866 }
30867
30868
30869
30870 for {
30871 if auxIntToInt64(v.AuxInt) != 16 {
30872 break
30873 }
30874 dst := v_0
30875 src := v_1
30876 mem := v_2
30877 if !(!config.useSSE) {
30878 break
30879 }
30880 v.reset(OpAMD64MOVQstore)
30881 v.AuxInt = int32ToAuxInt(8)
30882 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
30883 v0.AuxInt = int32ToAuxInt(8)
30884 v0.AddArg2(src, mem)
30885 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
30886 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
30887 v2.AddArg2(src, mem)
30888 v1.AddArg3(dst, v2, mem)
30889 v.AddArg3(dst, v0, v1)
30890 return true
30891 }
30892
30893
30894 for {
30895 if auxIntToInt64(v.AuxInt) != 32 {
30896 break
30897 }
30898 dst := v_0
30899 src := v_1
30900 mem := v_2
30901 v.reset(OpMove)
30902 v.AuxInt = int64ToAuxInt(16)
30903 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
30904 v0.AuxInt = int64ToAuxInt(16)
30905 v0.AddArg(dst)
30906 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
30907 v1.AuxInt = int64ToAuxInt(16)
30908 v1.AddArg(src)
30909 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
30910 v2.AuxInt = int64ToAuxInt(16)
30911 v2.AddArg3(dst, src, mem)
30912 v.AddArg3(v0, v1, v2)
30913 return true
30914 }
30915
30916
30917
30918 for {
30919 if auxIntToInt64(v.AuxInt) != 48 {
30920 break
30921 }
30922 dst := v_0
30923 src := v_1
30924 mem := v_2
30925 if !(config.useSSE) {
30926 break
30927 }
30928 v.reset(OpMove)
30929 v.AuxInt = int64ToAuxInt(32)
30930 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
30931 v0.AuxInt = int64ToAuxInt(16)
30932 v0.AddArg(dst)
30933 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
30934 v1.AuxInt = int64ToAuxInt(16)
30935 v1.AddArg(src)
30936 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
30937 v2.AuxInt = int64ToAuxInt(16)
30938 v2.AddArg3(dst, src, mem)
30939 v.AddArg3(v0, v1, v2)
30940 return true
30941 }
30942
30943
30944
30945 for {
30946 if auxIntToInt64(v.AuxInt) != 64 {
30947 break
30948 }
30949 dst := v_0
30950 src := v_1
30951 mem := v_2
30952 if !(config.useSSE) {
30953 break
30954 }
30955 v.reset(OpMove)
30956 v.AuxInt = int64ToAuxInt(32)
30957 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
30958 v0.AuxInt = int64ToAuxInt(32)
30959 v0.AddArg(dst)
30960 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
30961 v1.AuxInt = int64ToAuxInt(32)
30962 v1.AddArg(src)
30963 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
30964 v2.AuxInt = int64ToAuxInt(32)
30965 v2.AddArg3(dst, src, mem)
30966 v.AddArg3(v0, v1, v2)
30967 return true
30968 }
30969
30970
30971 for {
30972 if auxIntToInt64(v.AuxInt) != 3 {
30973 break
30974 }
30975 dst := v_0
30976 src := v_1
30977 mem := v_2
30978 v.reset(OpAMD64MOVBstore)
30979 v.AuxInt = int32ToAuxInt(2)
30980 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
30981 v0.AuxInt = int32ToAuxInt(2)
30982 v0.AddArg2(src, mem)
30983 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
30984 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
30985 v2.AddArg2(src, mem)
30986 v1.AddArg3(dst, v2, mem)
30987 v.AddArg3(dst, v0, v1)
30988 return true
30989 }
30990
30991
30992 for {
30993 if auxIntToInt64(v.AuxInt) != 5 {
30994 break
30995 }
30996 dst := v_0
30997 src := v_1
30998 mem := v_2
30999 v.reset(OpAMD64MOVBstore)
31000 v.AuxInt = int32ToAuxInt(4)
31001 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31002 v0.AuxInt = int32ToAuxInt(4)
31003 v0.AddArg2(src, mem)
31004 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
31005 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31006 v2.AddArg2(src, mem)
31007 v1.AddArg3(dst, v2, mem)
31008 v.AddArg3(dst, v0, v1)
31009 return true
31010 }
31011
31012
31013 for {
31014 if auxIntToInt64(v.AuxInt) != 6 {
31015 break
31016 }
31017 dst := v_0
31018 src := v_1
31019 mem := v_2
31020 v.reset(OpAMD64MOVWstore)
31021 v.AuxInt = int32ToAuxInt(4)
31022 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
31023 v0.AuxInt = int32ToAuxInt(4)
31024 v0.AddArg2(src, mem)
31025 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
31026 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31027 v2.AddArg2(src, mem)
31028 v1.AddArg3(dst, v2, mem)
31029 v.AddArg3(dst, v0, v1)
31030 return true
31031 }
31032
31033
31034 for {
31035 if auxIntToInt64(v.AuxInt) != 7 {
31036 break
31037 }
31038 dst := v_0
31039 src := v_1
31040 mem := v_2
31041 v.reset(OpAMD64MOVLstore)
31042 v.AuxInt = int32ToAuxInt(3)
31043 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31044 v0.AuxInt = int32ToAuxInt(3)
31045 v0.AddArg2(src, mem)
31046 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
31047 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31048 v2.AddArg2(src, mem)
31049 v1.AddArg3(dst, v2, mem)
31050 v.AddArg3(dst, v0, v1)
31051 return true
31052 }
31053
31054
31055 for {
31056 if auxIntToInt64(v.AuxInt) != 9 {
31057 break
31058 }
31059 dst := v_0
31060 src := v_1
31061 mem := v_2
31062 v.reset(OpAMD64MOVBstore)
31063 v.AuxInt = int32ToAuxInt(8)
31064 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
31065 v0.AuxInt = int32ToAuxInt(8)
31066 v0.AddArg2(src, mem)
31067 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31068 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31069 v2.AddArg2(src, mem)
31070 v1.AddArg3(dst, v2, mem)
31071 v.AddArg3(dst, v0, v1)
31072 return true
31073 }
31074
31075
31076 for {
31077 if auxIntToInt64(v.AuxInt) != 10 {
31078 break
31079 }
31080 dst := v_0
31081 src := v_1
31082 mem := v_2
31083 v.reset(OpAMD64MOVWstore)
31084 v.AuxInt = int32ToAuxInt(8)
31085 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
31086 v0.AuxInt = int32ToAuxInt(8)
31087 v0.AddArg2(src, mem)
31088 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31089 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31090 v2.AddArg2(src, mem)
31091 v1.AddArg3(dst, v2, mem)
31092 v.AddArg3(dst, v0, v1)
31093 return true
31094 }
31095
31096
31097 for {
31098 if auxIntToInt64(v.AuxInt) != 12 {
31099 break
31100 }
31101 dst := v_0
31102 src := v_1
31103 mem := v_2
31104 v.reset(OpAMD64MOVLstore)
31105 v.AuxInt = int32ToAuxInt(8)
31106 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
31107 v0.AuxInt = int32ToAuxInt(8)
31108 v0.AddArg2(src, mem)
31109 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31110 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31111 v2.AddArg2(src, mem)
31112 v1.AddArg3(dst, v2, mem)
31113 v.AddArg3(dst, v0, v1)
31114 return true
31115 }
31116
31117
31118
31119 for {
31120 s := auxIntToInt64(v.AuxInt)
31121 dst := v_0
31122 src := v_1
31123 mem := v_2
31124 if !(s == 11 || s >= 13 && s <= 15) {
31125 break
31126 }
31127 v.reset(OpAMD64MOVQstore)
31128 v.AuxInt = int32ToAuxInt(int32(s - 8))
31129 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31130 v0.AuxInt = int32ToAuxInt(int32(s - 8))
31131 v0.AddArg2(src, mem)
31132 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31133 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31134 v2.AddArg2(src, mem)
31135 v1.AddArg3(dst, v2, mem)
31136 v.AddArg3(dst, v0, v1)
31137 return true
31138 }
31139
31140
31141
31142 for {
31143 s := auxIntToInt64(v.AuxInt)
31144 dst := v_0
31145 src := v_1
31146 mem := v_2
31147 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
31148 break
31149 }
31150 v.reset(OpMove)
31151 v.AuxInt = int64ToAuxInt(s - s%16)
31152 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31153 v0.AuxInt = int64ToAuxInt(s % 16)
31154 v0.AddArg(dst)
31155 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31156 v1.AuxInt = int64ToAuxInt(s % 16)
31157 v1.AddArg(src)
31158 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31159 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31160 v3.AddArg2(src, mem)
31161 v2.AddArg3(dst, v3, mem)
31162 v.AddArg3(v0, v1, v2)
31163 return true
31164 }
31165
31166
31167
31168 for {
31169 s := auxIntToInt64(v.AuxInt)
31170 dst := v_0
31171 src := v_1
31172 mem := v_2
31173 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
31174 break
31175 }
31176 v.reset(OpMove)
31177 v.AuxInt = int64ToAuxInt(s - s%16)
31178 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31179 v0.AuxInt = int64ToAuxInt(s % 16)
31180 v0.AddArg(dst)
31181 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31182 v1.AuxInt = int64ToAuxInt(s % 16)
31183 v1.AddArg(src)
31184 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
31185 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
31186 v3.AddArg2(src, mem)
31187 v2.AddArg3(dst, v3, mem)
31188 v.AddArg3(v0, v1, v2)
31189 return true
31190 }
31191
31192
31193
31194 for {
31195 s := auxIntToInt64(v.AuxInt)
31196 dst := v_0
31197 src := v_1
31198 mem := v_2
31199 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
31200 break
31201 }
31202 v.reset(OpMove)
31203 v.AuxInt = int64ToAuxInt(s - s%16)
31204 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
31205 v0.AuxInt = int64ToAuxInt(s % 16)
31206 v0.AddArg(dst)
31207 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
31208 v1.AuxInt = int64ToAuxInt(s % 16)
31209 v1.AddArg(src)
31210 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31211 v2.AuxInt = int32ToAuxInt(8)
31212 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31213 v3.AuxInt = int32ToAuxInt(8)
31214 v3.AddArg2(src, mem)
31215 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
31216 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
31217 v5.AddArg2(src, mem)
31218 v4.AddArg3(dst, v5, mem)
31219 v2.AddArg3(dst, v3, v4)
31220 v.AddArg3(v0, v1, v2)
31221 return true
31222 }
31223
31224
31225
31226 for {
31227 s := auxIntToInt64(v.AuxInt)
31228 dst := v_0
31229 src := v_1
31230 mem := v_2
31231 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
31232 break
31233 }
31234 v.reset(OpAMD64DUFFCOPY)
31235 v.AuxInt = int64ToAuxInt(s)
31236 v.AddArg3(dst, src, mem)
31237 return true
31238 }
31239
31240
31241
31242 for {
31243 s := auxIntToInt64(v.AuxInt)
31244 dst := v_0
31245 src := v_1
31246 mem := v_2
31247 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
31248 break
31249 }
31250 v.reset(OpAMD64REPMOVSQ)
31251 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
31252 v0.AuxInt = int64ToAuxInt(s / 8)
31253 v.AddArg4(dst, src, v0, mem)
31254 return true
31255 }
31256 return false
31257 }
31258 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
31259 v_0 := v.Args[0]
31260 b := v.Block
31261 typ := &b.Func.Config.Types
31262
31263
31264 for {
31265 x := v_0
31266 v.reset(OpAMD64PXOR)
31267 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
31268 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
31269 v.AddArg2(x, v0)
31270 return true
31271 }
31272 }
31273 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
31274 v_0 := v.Args[0]
31275 b := v.Block
31276 typ := &b.Func.Config.Types
31277
31278
31279 for {
31280 x := v_0
31281 v.reset(OpAMD64PXOR)
31282 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
31283 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
31284 v.AddArg2(x, v0)
31285 return true
31286 }
31287 }
31288 func rewriteValueAMD64_OpNeq16(v *Value) bool {
31289 v_1 := v.Args[1]
31290 v_0 := v.Args[0]
31291 b := v.Block
31292
31293
31294 for {
31295 x := v_0
31296 y := v_1
31297 v.reset(OpAMD64SETNE)
31298 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
31299 v0.AddArg2(x, y)
31300 v.AddArg(v0)
31301 return true
31302 }
31303 }
31304 func rewriteValueAMD64_OpNeq32(v *Value) bool {
31305 v_1 := v.Args[1]
31306 v_0 := v.Args[0]
31307 b := v.Block
31308
31309
31310 for {
31311 x := v_0
31312 y := v_1
31313 v.reset(OpAMD64SETNE)
31314 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
31315 v0.AddArg2(x, y)
31316 v.AddArg(v0)
31317 return true
31318 }
31319 }
31320 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
31321 v_1 := v.Args[1]
31322 v_0 := v.Args[0]
31323 b := v.Block
31324
31325
31326 for {
31327 x := v_0
31328 y := v_1
31329 v.reset(OpAMD64SETNEF)
31330 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
31331 v0.AddArg2(x, y)
31332 v.AddArg(v0)
31333 return true
31334 }
31335 }
31336 func rewriteValueAMD64_OpNeq64(v *Value) bool {
31337 v_1 := v.Args[1]
31338 v_0 := v.Args[0]
31339 b := v.Block
31340
31341
31342 for {
31343 x := v_0
31344 y := v_1
31345 v.reset(OpAMD64SETNE)
31346 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
31347 v0.AddArg2(x, y)
31348 v.AddArg(v0)
31349 return true
31350 }
31351 }
31352 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
31353 v_1 := v.Args[1]
31354 v_0 := v.Args[0]
31355 b := v.Block
31356
31357
31358 for {
31359 x := v_0
31360 y := v_1
31361 v.reset(OpAMD64SETNEF)
31362 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
31363 v0.AddArg2(x, y)
31364 v.AddArg(v0)
31365 return true
31366 }
31367 }
31368 func rewriteValueAMD64_OpNeq8(v *Value) bool {
31369 v_1 := v.Args[1]
31370 v_0 := v.Args[0]
31371 b := v.Block
31372
31373
31374 for {
31375 x := v_0
31376 y := v_1
31377 v.reset(OpAMD64SETNE)
31378 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
31379 v0.AddArg2(x, y)
31380 v.AddArg(v0)
31381 return true
31382 }
31383 }
31384 func rewriteValueAMD64_OpNeqB(v *Value) bool {
31385 v_1 := v.Args[1]
31386 v_0 := v.Args[0]
31387 b := v.Block
31388
31389
31390 for {
31391 x := v_0
31392 y := v_1
31393 v.reset(OpAMD64SETNE)
31394 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
31395 v0.AddArg2(x, y)
31396 v.AddArg(v0)
31397 return true
31398 }
31399 }
31400 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
31401 v_1 := v.Args[1]
31402 v_0 := v.Args[0]
31403 b := v.Block
31404
31405
31406 for {
31407 x := v_0
31408 y := v_1
31409 v.reset(OpAMD64SETNE)
31410 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
31411 v0.AddArg2(x, y)
31412 v.AddArg(v0)
31413 return true
31414 }
31415 }
31416 func rewriteValueAMD64_OpNot(v *Value) bool {
31417 v_0 := v.Args[0]
31418
31419
31420 for {
31421 x := v_0
31422 v.reset(OpAMD64XORLconst)
31423 v.AuxInt = int32ToAuxInt(1)
31424 v.AddArg(x)
31425 return true
31426 }
31427 }
31428 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
31429 v_0 := v.Args[0]
31430 b := v.Block
31431 typ := &b.Func.Config.Types
31432
31433
31434
31435 for {
31436 off := auxIntToInt64(v.AuxInt)
31437 ptr := v_0
31438 if !(is32Bit(off)) {
31439 break
31440 }
31441 v.reset(OpAMD64ADDQconst)
31442 v.AuxInt = int32ToAuxInt(int32(off))
31443 v.AddArg(ptr)
31444 return true
31445 }
31446
31447
31448 for {
31449 off := auxIntToInt64(v.AuxInt)
31450 ptr := v_0
31451 v.reset(OpAMD64ADDQ)
31452 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
31453 v0.AuxInt = int64ToAuxInt(off)
31454 v.AddArg2(v0, ptr)
31455 return true
31456 }
31457 }
31458 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
31459 v_2 := v.Args[2]
31460 v_1 := v.Args[1]
31461 v_0 := v.Args[0]
31462
31463
31464
31465 for {
31466 kind := auxIntToInt64(v.AuxInt)
31467 x := v_0
31468 y := v_1
31469 mem := v_2
31470 if !(boundsABI(kind) == 0) {
31471 break
31472 }
31473 v.reset(OpAMD64LoweredPanicBoundsA)
31474 v.AuxInt = int64ToAuxInt(kind)
31475 v.AddArg3(x, y, mem)
31476 return true
31477 }
31478
31479
31480
31481 for {
31482 kind := auxIntToInt64(v.AuxInt)
31483 x := v_0
31484 y := v_1
31485 mem := v_2
31486 if !(boundsABI(kind) == 1) {
31487 break
31488 }
31489 v.reset(OpAMD64LoweredPanicBoundsB)
31490 v.AuxInt = int64ToAuxInt(kind)
31491 v.AddArg3(x, y, mem)
31492 return true
31493 }
31494
31495
31496
31497 for {
31498 kind := auxIntToInt64(v.AuxInt)
31499 x := v_0
31500 y := v_1
31501 mem := v_2
31502 if !(boundsABI(kind) == 2) {
31503 break
31504 }
31505 v.reset(OpAMD64LoweredPanicBoundsC)
31506 v.AuxInt = int64ToAuxInt(kind)
31507 v.AddArg3(x, y, mem)
31508 return true
31509 }
31510 return false
31511 }
31512 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
31513 v_0 := v.Args[0]
31514 b := v.Block
31515 typ := &b.Func.Config.Types
31516
31517
31518 for {
31519 x := v_0
31520 v.reset(OpAMD64POPCNTL)
31521 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
31522 v0.AddArg(x)
31523 v.AddArg(v0)
31524 return true
31525 }
31526 }
31527 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
31528 v_0 := v.Args[0]
31529 b := v.Block
31530 typ := &b.Func.Config.Types
31531
31532
31533 for {
31534 x := v_0
31535 v.reset(OpAMD64POPCNTL)
31536 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
31537 v0.AddArg(x)
31538 v.AddArg(v0)
31539 return true
31540 }
31541 }
31542 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
31543 v_0 := v.Args[0]
31544
31545
31546 for {
31547 x := v_0
31548 v.reset(OpAMD64ROUNDSD)
31549 v.AuxInt = int8ToAuxInt(0)
31550 v.AddArg(x)
31551 return true
31552 }
31553 }
31554 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
31555 v_1 := v.Args[1]
31556 v_0 := v.Args[0]
31557 b := v.Block
31558
31559
31560
31561 for {
31562 t := v.Type
31563 x := v_0
31564 y := v_1
31565 if !(!shiftIsBounded(v)) {
31566 break
31567 }
31568 v.reset(OpAMD64ANDL)
31569 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
31570 v0.AddArg2(x, y)
31571 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31572 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31573 v2.AuxInt = int16ToAuxInt(16)
31574 v2.AddArg(y)
31575 v1.AddArg(v2)
31576 v.AddArg2(v0, v1)
31577 return true
31578 }
31579
31580
31581
31582 for {
31583 x := v_0
31584 y := v_1
31585 if !(shiftIsBounded(v)) {
31586 break
31587 }
31588 v.reset(OpAMD64SHRW)
31589 v.AddArg2(x, y)
31590 return true
31591 }
31592 return false
31593 }
31594 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
31595 v_1 := v.Args[1]
31596 v_0 := v.Args[0]
31597 b := v.Block
31598
31599
31600
31601 for {
31602 t := v.Type
31603 x := v_0
31604 y := v_1
31605 if !(!shiftIsBounded(v)) {
31606 break
31607 }
31608 v.reset(OpAMD64ANDL)
31609 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
31610 v0.AddArg2(x, y)
31611 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31612 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31613 v2.AuxInt = int32ToAuxInt(16)
31614 v2.AddArg(y)
31615 v1.AddArg(v2)
31616 v.AddArg2(v0, v1)
31617 return true
31618 }
31619
31620
31621
31622 for {
31623 x := v_0
31624 y := v_1
31625 if !(shiftIsBounded(v)) {
31626 break
31627 }
31628 v.reset(OpAMD64SHRW)
31629 v.AddArg2(x, y)
31630 return true
31631 }
31632 return false
31633 }
31634 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
31635 v_1 := v.Args[1]
31636 v_0 := v.Args[0]
31637 b := v.Block
31638
31639
31640
31641 for {
31642 t := v.Type
31643 x := v_0
31644 y := v_1
31645 if !(!shiftIsBounded(v)) {
31646 break
31647 }
31648 v.reset(OpAMD64ANDL)
31649 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
31650 v0.AddArg2(x, y)
31651 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31652 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31653 v2.AuxInt = int32ToAuxInt(16)
31654 v2.AddArg(y)
31655 v1.AddArg(v2)
31656 v.AddArg2(v0, v1)
31657 return true
31658 }
31659
31660
31661
31662 for {
31663 x := v_0
31664 y := v_1
31665 if !(shiftIsBounded(v)) {
31666 break
31667 }
31668 v.reset(OpAMD64SHRW)
31669 v.AddArg2(x, y)
31670 return true
31671 }
31672 return false
31673 }
31674 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
31675 v_1 := v.Args[1]
31676 v_0 := v.Args[0]
31677 b := v.Block
31678
31679
31680
31681 for {
31682 t := v.Type
31683 x := v_0
31684 y := v_1
31685 if !(!shiftIsBounded(v)) {
31686 break
31687 }
31688 v.reset(OpAMD64ANDL)
31689 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
31690 v0.AddArg2(x, y)
31691 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31692 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31693 v2.AuxInt = int8ToAuxInt(16)
31694 v2.AddArg(y)
31695 v1.AddArg(v2)
31696 v.AddArg2(v0, v1)
31697 return true
31698 }
31699
31700
31701
31702 for {
31703 x := v_0
31704 y := v_1
31705 if !(shiftIsBounded(v)) {
31706 break
31707 }
31708 v.reset(OpAMD64SHRW)
31709 v.AddArg2(x, y)
31710 return true
31711 }
31712 return false
31713 }
31714 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
31715 v_1 := v.Args[1]
31716 v_0 := v.Args[0]
31717 b := v.Block
31718
31719
31720
31721 for {
31722 t := v.Type
31723 x := v_0
31724 y := v_1
31725 if !(!shiftIsBounded(v)) {
31726 break
31727 }
31728 v.reset(OpAMD64SARW)
31729 v.Type = t
31730 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
31731 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
31732 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
31733 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31734 v3.AuxInt = int16ToAuxInt(16)
31735 v3.AddArg(y)
31736 v2.AddArg(v3)
31737 v1.AddArg(v2)
31738 v0.AddArg2(y, v1)
31739 v.AddArg2(x, v0)
31740 return true
31741 }
31742
31743
31744
31745 for {
31746 x := v_0
31747 y := v_1
31748 if !(shiftIsBounded(v)) {
31749 break
31750 }
31751 v.reset(OpAMD64SARW)
31752 v.AddArg2(x, y)
31753 return true
31754 }
31755 return false
31756 }
31757 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
31758 v_1 := v.Args[1]
31759 v_0 := v.Args[0]
31760 b := v.Block
31761
31762
31763
31764 for {
31765 t := v.Type
31766 x := v_0
31767 y := v_1
31768 if !(!shiftIsBounded(v)) {
31769 break
31770 }
31771 v.reset(OpAMD64SARW)
31772 v.Type = t
31773 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
31774 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
31775 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
31776 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31777 v3.AuxInt = int32ToAuxInt(16)
31778 v3.AddArg(y)
31779 v2.AddArg(v3)
31780 v1.AddArg(v2)
31781 v0.AddArg2(y, v1)
31782 v.AddArg2(x, v0)
31783 return true
31784 }
31785
31786
31787
31788 for {
31789 x := v_0
31790 y := v_1
31791 if !(shiftIsBounded(v)) {
31792 break
31793 }
31794 v.reset(OpAMD64SARW)
31795 v.AddArg2(x, y)
31796 return true
31797 }
31798 return false
31799 }
31800 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
31801 v_1 := v.Args[1]
31802 v_0 := v.Args[0]
31803 b := v.Block
31804
31805
31806
31807 for {
31808 t := v.Type
31809 x := v_0
31810 y := v_1
31811 if !(!shiftIsBounded(v)) {
31812 break
31813 }
31814 v.reset(OpAMD64SARW)
31815 v.Type = t
31816 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
31817 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
31818 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
31819 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31820 v3.AuxInt = int32ToAuxInt(16)
31821 v3.AddArg(y)
31822 v2.AddArg(v3)
31823 v1.AddArg(v2)
31824 v0.AddArg2(y, v1)
31825 v.AddArg2(x, v0)
31826 return true
31827 }
31828
31829
31830
31831 for {
31832 x := v_0
31833 y := v_1
31834 if !(shiftIsBounded(v)) {
31835 break
31836 }
31837 v.reset(OpAMD64SARW)
31838 v.AddArg2(x, y)
31839 return true
31840 }
31841 return false
31842 }
31843 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
31844 v_1 := v.Args[1]
31845 v_0 := v.Args[0]
31846 b := v.Block
31847
31848
31849
31850 for {
31851 t := v.Type
31852 x := v_0
31853 y := v_1
31854 if !(!shiftIsBounded(v)) {
31855 break
31856 }
31857 v.reset(OpAMD64SARW)
31858 v.Type = t
31859 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
31860 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
31861 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
31862 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
31863 v3.AuxInt = int8ToAuxInt(16)
31864 v3.AddArg(y)
31865 v2.AddArg(v3)
31866 v1.AddArg(v2)
31867 v0.AddArg2(y, v1)
31868 v.AddArg2(x, v0)
31869 return true
31870 }
31871
31872
31873
31874 for {
31875 x := v_0
31876 y := v_1
31877 if !(shiftIsBounded(v)) {
31878 break
31879 }
31880 v.reset(OpAMD64SARW)
31881 v.AddArg2(x, y)
31882 return true
31883 }
31884 return false
31885 }
31886 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
31887 v_1 := v.Args[1]
31888 v_0 := v.Args[0]
31889 b := v.Block
31890
31891
31892
31893 for {
31894 t := v.Type
31895 x := v_0
31896 y := v_1
31897 if !(!shiftIsBounded(v)) {
31898 break
31899 }
31900 v.reset(OpAMD64ANDL)
31901 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
31902 v0.AddArg2(x, y)
31903 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31904 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
31905 v2.AuxInt = int16ToAuxInt(32)
31906 v2.AddArg(y)
31907 v1.AddArg(v2)
31908 v.AddArg2(v0, v1)
31909 return true
31910 }
31911
31912
31913
31914 for {
31915 x := v_0
31916 y := v_1
31917 if !(shiftIsBounded(v)) {
31918 break
31919 }
31920 v.reset(OpAMD64SHRL)
31921 v.AddArg2(x, y)
31922 return true
31923 }
31924 return false
31925 }
31926 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
31927 v_1 := v.Args[1]
31928 v_0 := v.Args[0]
31929 b := v.Block
31930
31931
31932
31933 for {
31934 t := v.Type
31935 x := v_0
31936 y := v_1
31937 if !(!shiftIsBounded(v)) {
31938 break
31939 }
31940 v.reset(OpAMD64ANDL)
31941 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
31942 v0.AddArg2(x, y)
31943 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31944 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
31945 v2.AuxInt = int32ToAuxInt(32)
31946 v2.AddArg(y)
31947 v1.AddArg(v2)
31948 v.AddArg2(v0, v1)
31949 return true
31950 }
31951
31952
31953
31954 for {
31955 x := v_0
31956 y := v_1
31957 if !(shiftIsBounded(v)) {
31958 break
31959 }
31960 v.reset(OpAMD64SHRL)
31961 v.AddArg2(x, y)
31962 return true
31963 }
31964 return false
31965 }
31966 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
31967 v_1 := v.Args[1]
31968 v_0 := v.Args[0]
31969 b := v.Block
31970
31971
31972
31973 for {
31974 t := v.Type
31975 x := v_0
31976 y := v_1
31977 if !(!shiftIsBounded(v)) {
31978 break
31979 }
31980 v.reset(OpAMD64ANDL)
31981 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
31982 v0.AddArg2(x, y)
31983 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
31984 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
31985 v2.AuxInt = int32ToAuxInt(32)
31986 v2.AddArg(y)
31987 v1.AddArg(v2)
31988 v.AddArg2(v0, v1)
31989 return true
31990 }
31991
31992
31993
31994 for {
31995 x := v_0
31996 y := v_1
31997 if !(shiftIsBounded(v)) {
31998 break
31999 }
32000 v.reset(OpAMD64SHRL)
32001 v.AddArg2(x, y)
32002 return true
32003 }
32004 return false
32005 }
32006 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
32007 v_1 := v.Args[1]
32008 v_0 := v.Args[0]
32009 b := v.Block
32010
32011
32012
32013 for {
32014 t := v.Type
32015 x := v_0
32016 y := v_1
32017 if !(!shiftIsBounded(v)) {
32018 break
32019 }
32020 v.reset(OpAMD64ANDL)
32021 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
32022 v0.AddArg2(x, y)
32023 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32024 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32025 v2.AuxInt = int8ToAuxInt(32)
32026 v2.AddArg(y)
32027 v1.AddArg(v2)
32028 v.AddArg2(v0, v1)
32029 return true
32030 }
32031
32032
32033
32034 for {
32035 x := v_0
32036 y := v_1
32037 if !(shiftIsBounded(v)) {
32038 break
32039 }
32040 v.reset(OpAMD64SHRL)
32041 v.AddArg2(x, y)
32042 return true
32043 }
32044 return false
32045 }
32046 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
32047 v_1 := v.Args[1]
32048 v_0 := v.Args[0]
32049 b := v.Block
32050
32051
32052
32053 for {
32054 t := v.Type
32055 x := v_0
32056 y := v_1
32057 if !(!shiftIsBounded(v)) {
32058 break
32059 }
32060 v.reset(OpAMD64SARL)
32061 v.Type = t
32062 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32063 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32064 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32065 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32066 v3.AuxInt = int16ToAuxInt(32)
32067 v3.AddArg(y)
32068 v2.AddArg(v3)
32069 v1.AddArg(v2)
32070 v0.AddArg2(y, v1)
32071 v.AddArg2(x, v0)
32072 return true
32073 }
32074
32075
32076
32077 for {
32078 x := v_0
32079 y := v_1
32080 if !(shiftIsBounded(v)) {
32081 break
32082 }
32083 v.reset(OpAMD64SARL)
32084 v.AddArg2(x, y)
32085 return true
32086 }
32087 return false
32088 }
32089 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
32090 v_1 := v.Args[1]
32091 v_0 := v.Args[0]
32092 b := v.Block
32093
32094
32095
32096 for {
32097 t := v.Type
32098 x := v_0
32099 y := v_1
32100 if !(!shiftIsBounded(v)) {
32101 break
32102 }
32103 v.reset(OpAMD64SARL)
32104 v.Type = t
32105 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32106 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32107 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32108 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32109 v3.AuxInt = int32ToAuxInt(32)
32110 v3.AddArg(y)
32111 v2.AddArg(v3)
32112 v1.AddArg(v2)
32113 v0.AddArg2(y, v1)
32114 v.AddArg2(x, v0)
32115 return true
32116 }
32117
32118
32119
32120 for {
32121 x := v_0
32122 y := v_1
32123 if !(shiftIsBounded(v)) {
32124 break
32125 }
32126 v.reset(OpAMD64SARL)
32127 v.AddArg2(x, y)
32128 return true
32129 }
32130 return false
32131 }
32132 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
32133 v_1 := v.Args[1]
32134 v_0 := v.Args[0]
32135 b := v.Block
32136
32137
32138
32139 for {
32140 t := v.Type
32141 x := v_0
32142 y := v_1
32143 if !(!shiftIsBounded(v)) {
32144 break
32145 }
32146 v.reset(OpAMD64SARL)
32147 v.Type = t
32148 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
32149 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
32150 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
32151 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32152 v3.AuxInt = int32ToAuxInt(32)
32153 v3.AddArg(y)
32154 v2.AddArg(v3)
32155 v1.AddArg(v2)
32156 v0.AddArg2(y, v1)
32157 v.AddArg2(x, v0)
32158 return true
32159 }
32160
32161
32162
32163 for {
32164 x := v_0
32165 y := v_1
32166 if !(shiftIsBounded(v)) {
32167 break
32168 }
32169 v.reset(OpAMD64SARL)
32170 v.AddArg2(x, y)
32171 return true
32172 }
32173 return false
32174 }
32175 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
32176 v_1 := v.Args[1]
32177 v_0 := v.Args[0]
32178 b := v.Block
32179
32180
32181
32182 for {
32183 t := v.Type
32184 x := v_0
32185 y := v_1
32186 if !(!shiftIsBounded(v)) {
32187 break
32188 }
32189 v.reset(OpAMD64SARL)
32190 v.Type = t
32191 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32192 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32193 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32194 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32195 v3.AuxInt = int8ToAuxInt(32)
32196 v3.AddArg(y)
32197 v2.AddArg(v3)
32198 v1.AddArg(v2)
32199 v0.AddArg2(y, v1)
32200 v.AddArg2(x, v0)
32201 return true
32202 }
32203
32204
32205
32206 for {
32207 x := v_0
32208 y := v_1
32209 if !(shiftIsBounded(v)) {
32210 break
32211 }
32212 v.reset(OpAMD64SARL)
32213 v.AddArg2(x, y)
32214 return true
32215 }
32216 return false
32217 }
32218 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
32219 v_1 := v.Args[1]
32220 v_0 := v.Args[0]
32221 b := v.Block
32222
32223
32224
32225 for {
32226 t := v.Type
32227 x := v_0
32228 y := v_1
32229 if !(!shiftIsBounded(v)) {
32230 break
32231 }
32232 v.reset(OpAMD64ANDQ)
32233 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
32234 v0.AddArg2(x, y)
32235 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
32236 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32237 v2.AuxInt = int16ToAuxInt(64)
32238 v2.AddArg(y)
32239 v1.AddArg(v2)
32240 v.AddArg2(v0, v1)
32241 return true
32242 }
32243
32244
32245
32246 for {
32247 x := v_0
32248 y := v_1
32249 if !(shiftIsBounded(v)) {
32250 break
32251 }
32252 v.reset(OpAMD64SHRQ)
32253 v.AddArg2(x, y)
32254 return true
32255 }
32256 return false
32257 }
32258 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
32259 v_1 := v.Args[1]
32260 v_0 := v.Args[0]
32261 b := v.Block
32262
32263
32264
32265 for {
32266 t := v.Type
32267 x := v_0
32268 y := v_1
32269 if !(!shiftIsBounded(v)) {
32270 break
32271 }
32272 v.reset(OpAMD64ANDQ)
32273 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
32274 v0.AddArg2(x, y)
32275 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
32276 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32277 v2.AuxInt = int32ToAuxInt(64)
32278 v2.AddArg(y)
32279 v1.AddArg(v2)
32280 v.AddArg2(v0, v1)
32281 return true
32282 }
32283
32284
32285
32286 for {
32287 x := v_0
32288 y := v_1
32289 if !(shiftIsBounded(v)) {
32290 break
32291 }
32292 v.reset(OpAMD64SHRQ)
32293 v.AddArg2(x, y)
32294 return true
32295 }
32296 return false
32297 }
32298 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
32299 v_1 := v.Args[1]
32300 v_0 := v.Args[0]
32301 b := v.Block
32302
32303
32304
32305 for {
32306 t := v.Type
32307 x := v_0
32308 y := v_1
32309 if !(!shiftIsBounded(v)) {
32310 break
32311 }
32312 v.reset(OpAMD64ANDQ)
32313 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
32314 v0.AddArg2(x, y)
32315 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
32316 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32317 v2.AuxInt = int32ToAuxInt(64)
32318 v2.AddArg(y)
32319 v1.AddArg(v2)
32320 v.AddArg2(v0, v1)
32321 return true
32322 }
32323
32324
32325
32326 for {
32327 x := v_0
32328 y := v_1
32329 if !(shiftIsBounded(v)) {
32330 break
32331 }
32332 v.reset(OpAMD64SHRQ)
32333 v.AddArg2(x, y)
32334 return true
32335 }
32336 return false
32337 }
32338 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
32339 v_1 := v.Args[1]
32340 v_0 := v.Args[0]
32341 b := v.Block
32342
32343
32344
32345 for {
32346 t := v.Type
32347 x := v_0
32348 y := v_1
32349 if !(!shiftIsBounded(v)) {
32350 break
32351 }
32352 v.reset(OpAMD64ANDQ)
32353 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
32354 v0.AddArg2(x, y)
32355 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
32356 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32357 v2.AuxInt = int8ToAuxInt(64)
32358 v2.AddArg(y)
32359 v1.AddArg(v2)
32360 v.AddArg2(v0, v1)
32361 return true
32362 }
32363
32364
32365
32366 for {
32367 x := v_0
32368 y := v_1
32369 if !(shiftIsBounded(v)) {
32370 break
32371 }
32372 v.reset(OpAMD64SHRQ)
32373 v.AddArg2(x, y)
32374 return true
32375 }
32376 return false
32377 }
32378 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
32379 v_1 := v.Args[1]
32380 v_0 := v.Args[0]
32381 b := v.Block
32382
32383
32384
32385 for {
32386 t := v.Type
32387 x := v_0
32388 y := v_1
32389 if !(!shiftIsBounded(v)) {
32390 break
32391 }
32392 v.reset(OpAMD64SARQ)
32393 v.Type = t
32394 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32395 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32396 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32397 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32398 v3.AuxInt = int16ToAuxInt(64)
32399 v3.AddArg(y)
32400 v2.AddArg(v3)
32401 v1.AddArg(v2)
32402 v0.AddArg2(y, v1)
32403 v.AddArg2(x, v0)
32404 return true
32405 }
32406
32407
32408
32409 for {
32410 x := v_0
32411 y := v_1
32412 if !(shiftIsBounded(v)) {
32413 break
32414 }
32415 v.reset(OpAMD64SARQ)
32416 v.AddArg2(x, y)
32417 return true
32418 }
32419 return false
32420 }
32421 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
32422 v_1 := v.Args[1]
32423 v_0 := v.Args[0]
32424 b := v.Block
32425
32426
32427
32428 for {
32429 t := v.Type
32430 x := v_0
32431 y := v_1
32432 if !(!shiftIsBounded(v)) {
32433 break
32434 }
32435 v.reset(OpAMD64SARQ)
32436 v.Type = t
32437 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32438 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32439 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32440 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32441 v3.AuxInt = int32ToAuxInt(64)
32442 v3.AddArg(y)
32443 v2.AddArg(v3)
32444 v1.AddArg(v2)
32445 v0.AddArg2(y, v1)
32446 v.AddArg2(x, v0)
32447 return true
32448 }
32449
32450
32451
32452 for {
32453 x := v_0
32454 y := v_1
32455 if !(shiftIsBounded(v)) {
32456 break
32457 }
32458 v.reset(OpAMD64SARQ)
32459 v.AddArg2(x, y)
32460 return true
32461 }
32462 return false
32463 }
32464 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
32465 v_1 := v.Args[1]
32466 v_0 := v.Args[0]
32467 b := v.Block
32468
32469
32470
32471 for {
32472 t := v.Type
32473 x := v_0
32474 y := v_1
32475 if !(!shiftIsBounded(v)) {
32476 break
32477 }
32478 v.reset(OpAMD64SARQ)
32479 v.Type = t
32480 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
32481 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
32482 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
32483 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32484 v3.AuxInt = int32ToAuxInt(64)
32485 v3.AddArg(y)
32486 v2.AddArg(v3)
32487 v1.AddArg(v2)
32488 v0.AddArg2(y, v1)
32489 v.AddArg2(x, v0)
32490 return true
32491 }
32492
32493
32494
32495 for {
32496 x := v_0
32497 y := v_1
32498 if !(shiftIsBounded(v)) {
32499 break
32500 }
32501 v.reset(OpAMD64SARQ)
32502 v.AddArg2(x, y)
32503 return true
32504 }
32505 return false
32506 }
32507 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
32508 v_1 := v.Args[1]
32509 v_0 := v.Args[0]
32510 b := v.Block
32511
32512
32513
32514 for {
32515 t := v.Type
32516 x := v_0
32517 y := v_1
32518 if !(!shiftIsBounded(v)) {
32519 break
32520 }
32521 v.reset(OpAMD64SARQ)
32522 v.Type = t
32523 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32524 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32525 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32526 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32527 v3.AuxInt = int8ToAuxInt(64)
32528 v3.AddArg(y)
32529 v2.AddArg(v3)
32530 v1.AddArg(v2)
32531 v0.AddArg2(y, v1)
32532 v.AddArg2(x, v0)
32533 return true
32534 }
32535
32536
32537
32538 for {
32539 x := v_0
32540 y := v_1
32541 if !(shiftIsBounded(v)) {
32542 break
32543 }
32544 v.reset(OpAMD64SARQ)
32545 v.AddArg2(x, y)
32546 return true
32547 }
32548 return false
32549 }
32550 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
32551 v_1 := v.Args[1]
32552 v_0 := v.Args[0]
32553 b := v.Block
32554
32555
32556
32557 for {
32558 t := v.Type
32559 x := v_0
32560 y := v_1
32561 if !(!shiftIsBounded(v)) {
32562 break
32563 }
32564 v.reset(OpAMD64ANDL)
32565 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
32566 v0.AddArg2(x, y)
32567 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32568 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32569 v2.AuxInt = int16ToAuxInt(8)
32570 v2.AddArg(y)
32571 v1.AddArg(v2)
32572 v.AddArg2(v0, v1)
32573 return true
32574 }
32575
32576
32577
32578 for {
32579 x := v_0
32580 y := v_1
32581 if !(shiftIsBounded(v)) {
32582 break
32583 }
32584 v.reset(OpAMD64SHRB)
32585 v.AddArg2(x, y)
32586 return true
32587 }
32588 return false
32589 }
32590 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
32591 v_1 := v.Args[1]
32592 v_0 := v.Args[0]
32593 b := v.Block
32594
32595
32596
32597 for {
32598 t := v.Type
32599 x := v_0
32600 y := v_1
32601 if !(!shiftIsBounded(v)) {
32602 break
32603 }
32604 v.reset(OpAMD64ANDL)
32605 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
32606 v0.AddArg2(x, y)
32607 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32608 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32609 v2.AuxInt = int32ToAuxInt(8)
32610 v2.AddArg(y)
32611 v1.AddArg(v2)
32612 v.AddArg2(v0, v1)
32613 return true
32614 }
32615
32616
32617
32618 for {
32619 x := v_0
32620 y := v_1
32621 if !(shiftIsBounded(v)) {
32622 break
32623 }
32624 v.reset(OpAMD64SHRB)
32625 v.AddArg2(x, y)
32626 return true
32627 }
32628 return false
32629 }
32630 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
32631 v_1 := v.Args[1]
32632 v_0 := v.Args[0]
32633 b := v.Block
32634
32635
32636
32637 for {
32638 t := v.Type
32639 x := v_0
32640 y := v_1
32641 if !(!shiftIsBounded(v)) {
32642 break
32643 }
32644 v.reset(OpAMD64ANDL)
32645 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
32646 v0.AddArg2(x, y)
32647 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32648 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32649 v2.AuxInt = int32ToAuxInt(8)
32650 v2.AddArg(y)
32651 v1.AddArg(v2)
32652 v.AddArg2(v0, v1)
32653 return true
32654 }
32655
32656
32657
32658 for {
32659 x := v_0
32660 y := v_1
32661 if !(shiftIsBounded(v)) {
32662 break
32663 }
32664 v.reset(OpAMD64SHRB)
32665 v.AddArg2(x, y)
32666 return true
32667 }
32668 return false
32669 }
32670 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
32671 v_1 := v.Args[1]
32672 v_0 := v.Args[0]
32673 b := v.Block
32674
32675
32676
32677 for {
32678 t := v.Type
32679 x := v_0
32680 y := v_1
32681 if !(!shiftIsBounded(v)) {
32682 break
32683 }
32684 v.reset(OpAMD64ANDL)
32685 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
32686 v0.AddArg2(x, y)
32687 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
32688 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32689 v2.AuxInt = int8ToAuxInt(8)
32690 v2.AddArg(y)
32691 v1.AddArg(v2)
32692 v.AddArg2(v0, v1)
32693 return true
32694 }
32695
32696
32697
32698 for {
32699 x := v_0
32700 y := v_1
32701 if !(shiftIsBounded(v)) {
32702 break
32703 }
32704 v.reset(OpAMD64SHRB)
32705 v.AddArg2(x, y)
32706 return true
32707 }
32708 return false
32709 }
32710 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
32711 v_1 := v.Args[1]
32712 v_0 := v.Args[0]
32713 b := v.Block
32714
32715
32716
32717 for {
32718 t := v.Type
32719 x := v_0
32720 y := v_1
32721 if !(!shiftIsBounded(v)) {
32722 break
32723 }
32724 v.reset(OpAMD64SARB)
32725 v.Type = t
32726 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32727 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32728 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32729 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
32730 v3.AuxInt = int16ToAuxInt(8)
32731 v3.AddArg(y)
32732 v2.AddArg(v3)
32733 v1.AddArg(v2)
32734 v0.AddArg2(y, v1)
32735 v.AddArg2(x, v0)
32736 return true
32737 }
32738
32739
32740
32741 for {
32742 x := v_0
32743 y := v_1
32744 if !(shiftIsBounded(v)) {
32745 break
32746 }
32747 v.reset(OpAMD64SARB)
32748 v.AddArg2(x, y)
32749 return true
32750 }
32751 return false
32752 }
32753 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
32754 v_1 := v.Args[1]
32755 v_0 := v.Args[0]
32756 b := v.Block
32757
32758
32759
32760 for {
32761 t := v.Type
32762 x := v_0
32763 y := v_1
32764 if !(!shiftIsBounded(v)) {
32765 break
32766 }
32767 v.reset(OpAMD64SARB)
32768 v.Type = t
32769 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32770 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32771 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32772 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
32773 v3.AuxInt = int32ToAuxInt(8)
32774 v3.AddArg(y)
32775 v2.AddArg(v3)
32776 v1.AddArg(v2)
32777 v0.AddArg2(y, v1)
32778 v.AddArg2(x, v0)
32779 return true
32780 }
32781
32782
32783
32784 for {
32785 x := v_0
32786 y := v_1
32787 if !(shiftIsBounded(v)) {
32788 break
32789 }
32790 v.reset(OpAMD64SARB)
32791 v.AddArg2(x, y)
32792 return true
32793 }
32794 return false
32795 }
32796 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
32797 v_1 := v.Args[1]
32798 v_0 := v.Args[0]
32799 b := v.Block
32800
32801
32802
32803 for {
32804 t := v.Type
32805 x := v_0
32806 y := v_1
32807 if !(!shiftIsBounded(v)) {
32808 break
32809 }
32810 v.reset(OpAMD64SARB)
32811 v.Type = t
32812 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
32813 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
32814 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
32815 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
32816 v3.AuxInt = int32ToAuxInt(8)
32817 v3.AddArg(y)
32818 v2.AddArg(v3)
32819 v1.AddArg(v2)
32820 v0.AddArg2(y, v1)
32821 v.AddArg2(x, v0)
32822 return true
32823 }
32824
32825
32826
32827 for {
32828 x := v_0
32829 y := v_1
32830 if !(shiftIsBounded(v)) {
32831 break
32832 }
32833 v.reset(OpAMD64SARB)
32834 v.AddArg2(x, y)
32835 return true
32836 }
32837 return false
32838 }
32839 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
32840 v_1 := v.Args[1]
32841 v_0 := v.Args[0]
32842 b := v.Block
32843
32844
32845
32846 for {
32847 t := v.Type
32848 x := v_0
32849 y := v_1
32850 if !(!shiftIsBounded(v)) {
32851 break
32852 }
32853 v.reset(OpAMD64SARB)
32854 v.Type = t
32855 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
32856 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
32857 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
32858 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
32859 v3.AuxInt = int8ToAuxInt(8)
32860 v3.AddArg(y)
32861 v2.AddArg(v3)
32862 v1.AddArg(v2)
32863 v0.AddArg2(y, v1)
32864 v.AddArg2(x, v0)
32865 return true
32866 }
32867
32868
32869
32870 for {
32871 x := v_0
32872 y := v_1
32873 if !(shiftIsBounded(v)) {
32874 break
32875 }
32876 v.reset(OpAMD64SARB)
32877 v.AddArg2(x, y)
32878 return true
32879 }
32880 return false
32881 }
32882 func rewriteValueAMD64_OpSelect0(v *Value) bool {
32883 v_0 := v.Args[0]
32884 b := v.Block
32885 typ := &b.Func.Config.Types
32886
32887
32888 for {
32889 if v_0.Op != OpMul64uover {
32890 break
32891 }
32892 y := v_0.Args[1]
32893 x := v_0.Args[0]
32894 v.reset(OpSelect0)
32895 v.Type = typ.UInt64
32896 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
32897 v0.AddArg2(x, y)
32898 v.AddArg(v0)
32899 return true
32900 }
32901
32902
32903 for {
32904 if v_0.Op != OpMul32uover {
32905 break
32906 }
32907 y := v_0.Args[1]
32908 x := v_0.Args[0]
32909 v.reset(OpSelect0)
32910 v.Type = typ.UInt32
32911 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
32912 v0.AddArg2(x, y)
32913 v.AddArg(v0)
32914 return true
32915 }
32916
32917
32918 for {
32919 if v_0.Op != OpAdd64carry {
32920 break
32921 }
32922 c := v_0.Args[2]
32923 x := v_0.Args[0]
32924 y := v_0.Args[1]
32925 v.reset(OpSelect0)
32926 v.Type = typ.UInt64
32927 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
32928 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
32929 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
32930 v2.AddArg(c)
32931 v1.AddArg(v2)
32932 v0.AddArg3(x, y, v1)
32933 v.AddArg(v0)
32934 return true
32935 }
32936
32937
32938 for {
32939 if v_0.Op != OpSub64borrow {
32940 break
32941 }
32942 c := v_0.Args[2]
32943 x := v_0.Args[0]
32944 y := v_0.Args[1]
32945 v.reset(OpSelect0)
32946 v.Type = typ.UInt64
32947 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
32948 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
32949 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
32950 v2.AddArg(c)
32951 v1.AddArg(v2)
32952 v0.AddArg3(x, y, v1)
32953 v.AddArg(v0)
32954 return true
32955 }
32956
32957
32958 for {
32959 t := v.Type
32960 if v_0.Op != OpAMD64AddTupleFirst32 {
32961 break
32962 }
32963 tuple := v_0.Args[1]
32964 val := v_0.Args[0]
32965 v.reset(OpAMD64ADDL)
32966 v0 := b.NewValue0(v.Pos, OpSelect0, t)
32967 v0.AddArg(tuple)
32968 v.AddArg2(val, v0)
32969 return true
32970 }
32971
32972
32973 for {
32974 t := v.Type
32975 if v_0.Op != OpAMD64AddTupleFirst64 {
32976 break
32977 }
32978 tuple := v_0.Args[1]
32979 val := v_0.Args[0]
32980 v.reset(OpAMD64ADDQ)
32981 v0 := b.NewValue0(v.Pos, OpSelect0, t)
32982 v0.AddArg(tuple)
32983 v.AddArg2(val, v0)
32984 return true
32985 }
32986 return false
32987 }
32988 func rewriteValueAMD64_OpSelect1(v *Value) bool {
32989 v_0 := v.Args[0]
32990 b := v.Block
32991 typ := &b.Func.Config.Types
32992
32993
32994 for {
32995 if v_0.Op != OpMul64uover {
32996 break
32997 }
32998 y := v_0.Args[1]
32999 x := v_0.Args[0]
33000 v.reset(OpAMD64SETO)
33001 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33002 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
33003 v1.AddArg2(x, y)
33004 v0.AddArg(v1)
33005 v.AddArg(v0)
33006 return true
33007 }
33008
33009
33010 for {
33011 if v_0.Op != OpMul32uover {
33012 break
33013 }
33014 y := v_0.Args[1]
33015 x := v_0.Args[0]
33016 v.reset(OpAMD64SETO)
33017 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33018 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
33019 v1.AddArg2(x, y)
33020 v0.AddArg(v1)
33021 v.AddArg(v0)
33022 return true
33023 }
33024
33025
33026 for {
33027 if v_0.Op != OpAdd64carry {
33028 break
33029 }
33030 c := v_0.Args[2]
33031 x := v_0.Args[0]
33032 y := v_0.Args[1]
33033 v.reset(OpAMD64NEGQ)
33034 v.Type = typ.UInt64
33035 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
33036 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33037 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
33038 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33039 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
33040 v4.AddArg(c)
33041 v3.AddArg(v4)
33042 v2.AddArg3(x, y, v3)
33043 v1.AddArg(v2)
33044 v0.AddArg(v1)
33045 v.AddArg(v0)
33046 return true
33047 }
33048
33049
33050 for {
33051 if v_0.Op != OpSub64borrow {
33052 break
33053 }
33054 c := v_0.Args[2]
33055 x := v_0.Args[0]
33056 y := v_0.Args[1]
33057 v.reset(OpAMD64NEGQ)
33058 v.Type = typ.UInt64
33059 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
33060 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33061 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
33062 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
33063 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
33064 v4.AddArg(c)
33065 v3.AddArg(v4)
33066 v2.AddArg3(x, y, v3)
33067 v1.AddArg(v2)
33068 v0.AddArg(v1)
33069 v.AddArg(v0)
33070 return true
33071 }
33072
33073
33074 for {
33075 if v_0.Op != OpAMD64NEGLflags {
33076 break
33077 }
33078 v_0_0 := v_0.Args[0]
33079 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
33080 break
33081 }
33082 v.reset(OpAMD64FlagEQ)
33083 return true
33084 }
33085
33086
33087 for {
33088 if v_0.Op != OpAMD64NEGLflags {
33089 break
33090 }
33091 v_0_0 := v_0.Args[0]
33092 if v_0_0.Op != OpAMD64NEGQ {
33093 break
33094 }
33095 v_0_0_0 := v_0_0.Args[0]
33096 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
33097 break
33098 }
33099 x := v_0_0_0.Args[0]
33100 v.copyOf(x)
33101 return true
33102 }
33103
33104
33105 for {
33106 if v_0.Op != OpAMD64AddTupleFirst32 {
33107 break
33108 }
33109 tuple := v_0.Args[1]
33110 v.reset(OpSelect1)
33111 v.AddArg(tuple)
33112 return true
33113 }
33114
33115
33116 for {
33117 if v_0.Op != OpAMD64AddTupleFirst64 {
33118 break
33119 }
33120 tuple := v_0.Args[1]
33121 v.reset(OpSelect1)
33122 v.AddArg(tuple)
33123 return true
33124 }
33125 return false
33126 }
33127 func rewriteValueAMD64_OpSelectN(v *Value) bool {
33128 v_0 := v.Args[0]
33129 b := v.Block
33130 config := b.Func.Config
33131
33132
33133
33134 for {
33135 if auxIntToInt64(v.AuxInt) != 0 {
33136 break
33137 }
33138 call := v_0
33139 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
33140 break
33141 }
33142 sym := auxToCall(call.Aux)
33143 s1 := call.Args[0]
33144 if s1.Op != OpAMD64MOVQstoreconst {
33145 break
33146 }
33147 sc := auxIntToValAndOff(s1.AuxInt)
33148 _ = s1.Args[1]
33149 s2 := s1.Args[1]
33150 if s2.Op != OpAMD64MOVQstore {
33151 break
33152 }
33153 _ = s2.Args[2]
33154 src := s2.Args[1]
33155 s3 := s2.Args[2]
33156 if s3.Op != OpAMD64MOVQstore {
33157 break
33158 }
33159 mem := s3.Args[2]
33160 dst := s3.Args[1]
33161 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
33162 break
33163 }
33164 v.reset(OpMove)
33165 v.AuxInt = int64ToAuxInt(sc.Val64())
33166 v.AddArg3(dst, src, mem)
33167 return true
33168 }
33169
33170
33171
33172 for {
33173 if auxIntToInt64(v.AuxInt) != 0 {
33174 break
33175 }
33176 call := v_0
33177 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
33178 break
33179 }
33180 sym := auxToCall(call.Aux)
33181 mem := call.Args[3]
33182 dst := call.Args[0]
33183 src := call.Args[1]
33184 call_2 := call.Args[2]
33185 if call_2.Op != OpAMD64MOVQconst {
33186 break
33187 }
33188 sz := auxIntToInt64(call_2.AuxInt)
33189 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
33190 break
33191 }
33192 v.reset(OpMove)
33193 v.AuxInt = int64ToAuxInt(sz)
33194 v.AddArg3(dst, src, mem)
33195 return true
33196 }
33197 return false
33198 }
33199 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
33200 v_0 := v.Args[0]
33201 b := v.Block
33202
33203
33204 for {
33205 t := v.Type
33206 x := v_0
33207 v.reset(OpAMD64SARQconst)
33208 v.AuxInt = int8ToAuxInt(63)
33209 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
33210 v0.AddArg(x)
33211 v.AddArg(v0)
33212 return true
33213 }
33214 }
33215 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
33216 v_1 := v.Args[1]
33217 v_0 := v.Args[0]
33218 b := v.Block
33219 typ := &b.Func.Config.Types
33220
33221
33222 for {
33223 x := v_0
33224 y := v_1
33225 v.reset(OpAMD64CMOVQCC)
33226 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
33227 v0.AuxInt = int64ToAuxInt(0)
33228 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
33229 v1.AddArg2(x, y)
33230 v.AddArg3(x, v0, v1)
33231 return true
33232 }
33233 }
33234 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
33235 v_1 := v.Args[1]
33236 v_0 := v.Args[0]
33237 b := v.Block
33238 typ := &b.Func.Config.Types
33239
33240
33241 for {
33242 x := v_0
33243 y := v_1
33244 v.reset(OpAMD64CMOVQHI)
33245 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
33246 v0.AuxInt = int64ToAuxInt(0)
33247 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
33248 v1.AddArg2(x, y)
33249 v.AddArg3(x, v0, v1)
33250 return true
33251 }
33252 }
33253 func rewriteValueAMD64_OpStore(v *Value) bool {
33254 v_2 := v.Args[2]
33255 v_1 := v.Args[1]
33256 v_0 := v.Args[0]
33257
33258
33259
33260 for {
33261 t := auxToType(v.Aux)
33262 ptr := v_0
33263 val := v_1
33264 mem := v_2
33265 if !(t.Size() == 8 && is64BitFloat(val.Type)) {
33266 break
33267 }
33268 v.reset(OpAMD64MOVSDstore)
33269 v.AddArg3(ptr, val, mem)
33270 return true
33271 }
33272
33273
33274
33275 for {
33276 t := auxToType(v.Aux)
33277 ptr := v_0
33278 val := v_1
33279 mem := v_2
33280 if !(t.Size() == 4 && is32BitFloat(val.Type)) {
33281 break
33282 }
33283 v.reset(OpAMD64MOVSSstore)
33284 v.AddArg3(ptr, val, mem)
33285 return true
33286 }
33287
33288
33289
33290 for {
33291 t := auxToType(v.Aux)
33292 ptr := v_0
33293 val := v_1
33294 mem := v_2
33295 if !(t.Size() == 8) {
33296 break
33297 }
33298 v.reset(OpAMD64MOVQstore)
33299 v.AddArg3(ptr, val, mem)
33300 return true
33301 }
33302
33303
33304
33305 for {
33306 t := auxToType(v.Aux)
33307 ptr := v_0
33308 val := v_1
33309 mem := v_2
33310 if !(t.Size() == 4) {
33311 break
33312 }
33313 v.reset(OpAMD64MOVLstore)
33314 v.AddArg3(ptr, val, mem)
33315 return true
33316 }
33317
33318
33319
33320 for {
33321 t := auxToType(v.Aux)
33322 ptr := v_0
33323 val := v_1
33324 mem := v_2
33325 if !(t.Size() == 2) {
33326 break
33327 }
33328 v.reset(OpAMD64MOVWstore)
33329 v.AddArg3(ptr, val, mem)
33330 return true
33331 }
33332
33333
33334
33335 for {
33336 t := auxToType(v.Aux)
33337 ptr := v_0
33338 val := v_1
33339 mem := v_2
33340 if !(t.Size() == 1) {
33341 break
33342 }
33343 v.reset(OpAMD64MOVBstore)
33344 v.AddArg3(ptr, val, mem)
33345 return true
33346 }
33347 return false
33348 }
33349 func rewriteValueAMD64_OpTrunc(v *Value) bool {
33350 v_0 := v.Args[0]
33351
33352
33353 for {
33354 x := v_0
33355 v.reset(OpAMD64ROUNDSD)
33356 v.AuxInt = int8ToAuxInt(3)
33357 v.AddArg(x)
33358 return true
33359 }
33360 }
33361 func rewriteValueAMD64_OpZero(v *Value) bool {
33362 v_1 := v.Args[1]
33363 v_0 := v.Args[0]
33364 b := v.Block
33365 config := b.Func.Config
33366 typ := &b.Func.Config.Types
33367
33368
33369 for {
33370 if auxIntToInt64(v.AuxInt) != 0 {
33371 break
33372 }
33373 mem := v_1
33374 v.copyOf(mem)
33375 return true
33376 }
33377
33378
33379 for {
33380 if auxIntToInt64(v.AuxInt) != 1 {
33381 break
33382 }
33383 destptr := v_0
33384 mem := v_1
33385 v.reset(OpAMD64MOVBstoreconst)
33386 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33387 v.AddArg2(destptr, mem)
33388 return true
33389 }
33390
33391
33392 for {
33393 if auxIntToInt64(v.AuxInt) != 2 {
33394 break
33395 }
33396 destptr := v_0
33397 mem := v_1
33398 v.reset(OpAMD64MOVWstoreconst)
33399 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33400 v.AddArg2(destptr, mem)
33401 return true
33402 }
33403
33404
33405 for {
33406 if auxIntToInt64(v.AuxInt) != 4 {
33407 break
33408 }
33409 destptr := v_0
33410 mem := v_1
33411 v.reset(OpAMD64MOVLstoreconst)
33412 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33413 v.AddArg2(destptr, mem)
33414 return true
33415 }
33416
33417
33418 for {
33419 if auxIntToInt64(v.AuxInt) != 8 {
33420 break
33421 }
33422 destptr := v_0
33423 mem := v_1
33424 v.reset(OpAMD64MOVQstoreconst)
33425 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33426 v.AddArg2(destptr, mem)
33427 return true
33428 }
33429
33430
33431 for {
33432 if auxIntToInt64(v.AuxInt) != 3 {
33433 break
33434 }
33435 destptr := v_0
33436 mem := v_1
33437 v.reset(OpAMD64MOVBstoreconst)
33438 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
33439 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
33440 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33441 v0.AddArg2(destptr, mem)
33442 v.AddArg2(destptr, v0)
33443 return true
33444 }
33445
33446
33447 for {
33448 if auxIntToInt64(v.AuxInt) != 5 {
33449 break
33450 }
33451 destptr := v_0
33452 mem := v_1
33453 v.reset(OpAMD64MOVBstoreconst)
33454 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
33455 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
33456 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33457 v0.AddArg2(destptr, mem)
33458 v.AddArg2(destptr, v0)
33459 return true
33460 }
33461
33462
33463 for {
33464 if auxIntToInt64(v.AuxInt) != 6 {
33465 break
33466 }
33467 destptr := v_0
33468 mem := v_1
33469 v.reset(OpAMD64MOVWstoreconst)
33470 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
33471 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
33472 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33473 v0.AddArg2(destptr, mem)
33474 v.AddArg2(destptr, v0)
33475 return true
33476 }
33477
33478
33479 for {
33480 if auxIntToInt64(v.AuxInt) != 7 {
33481 break
33482 }
33483 destptr := v_0
33484 mem := v_1
33485 v.reset(OpAMD64MOVLstoreconst)
33486 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
33487 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
33488 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33489 v0.AddArg2(destptr, mem)
33490 v.AddArg2(destptr, v0)
33491 return true
33492 }
33493
33494
33495
33496 for {
33497 s := auxIntToInt64(v.AuxInt)
33498 destptr := v_0
33499 mem := v_1
33500 if !(s%8 != 0 && s > 8 && !config.useSSE) {
33501 break
33502 }
33503 v.reset(OpZero)
33504 v.AuxInt = int64ToAuxInt(s - s%8)
33505 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
33506 v0.AuxInt = int64ToAuxInt(s % 8)
33507 v0.AddArg(destptr)
33508 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33509 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33510 v1.AddArg2(destptr, mem)
33511 v.AddArg2(v0, v1)
33512 return true
33513 }
33514
33515
33516
33517 for {
33518 if auxIntToInt64(v.AuxInt) != 16 {
33519 break
33520 }
33521 destptr := v_0
33522 mem := v_1
33523 if !(!config.useSSE) {
33524 break
33525 }
33526 v.reset(OpAMD64MOVQstoreconst)
33527 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
33528 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33529 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33530 v0.AddArg2(destptr, mem)
33531 v.AddArg2(destptr, v0)
33532 return true
33533 }
33534
33535
33536
33537 for {
33538 if auxIntToInt64(v.AuxInt) != 24 {
33539 break
33540 }
33541 destptr := v_0
33542 mem := v_1
33543 if !(!config.useSSE) {
33544 break
33545 }
33546 v.reset(OpAMD64MOVQstoreconst)
33547 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
33548 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33549 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
33550 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33551 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33552 v1.AddArg2(destptr, mem)
33553 v0.AddArg2(destptr, v1)
33554 v.AddArg2(destptr, v0)
33555 return true
33556 }
33557
33558
33559
33560 for {
33561 if auxIntToInt64(v.AuxInt) != 32 {
33562 break
33563 }
33564 destptr := v_0
33565 mem := v_1
33566 if !(!config.useSSE) {
33567 break
33568 }
33569 v.reset(OpAMD64MOVQstoreconst)
33570 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
33571 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33572 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
33573 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33574 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
33575 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33576 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33577 v2.AddArg2(destptr, mem)
33578 v1.AddArg2(destptr, v2)
33579 v0.AddArg2(destptr, v1)
33580 v.AddArg2(destptr, v0)
33581 return true
33582 }
33583
33584
33585
33586 for {
33587 s := auxIntToInt64(v.AuxInt)
33588 destptr := v_0
33589 mem := v_1
33590 if !(s > 8 && s < 16 && config.useSSE) {
33591 break
33592 }
33593 v.reset(OpAMD64MOVQstoreconst)
33594 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
33595 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
33596 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33597 v0.AddArg2(destptr, mem)
33598 v.AddArg2(destptr, v0)
33599 return true
33600 }
33601
33602
33603
33604 for {
33605 s := auxIntToInt64(v.AuxInt)
33606 destptr := v_0
33607 mem := v_1
33608 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
33609 break
33610 }
33611 v.reset(OpZero)
33612 v.AuxInt = int64ToAuxInt(s - s%16)
33613 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
33614 v0.AuxInt = int64ToAuxInt(s % 16)
33615 v0.AddArg(destptr)
33616 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33617 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33618 v1.AddArg2(destptr, mem)
33619 v.AddArg2(v0, v1)
33620 return true
33621 }
33622
33623
33624
33625 for {
33626 s := auxIntToInt64(v.AuxInt)
33627 destptr := v_0
33628 mem := v_1
33629 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
33630 break
33631 }
33632 v.reset(OpZero)
33633 v.AuxInt = int64ToAuxInt(s - s%16)
33634 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
33635 v0.AuxInt = int64ToAuxInt(s % 16)
33636 v0.AddArg(destptr)
33637 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33638 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33639 v1.AddArg2(destptr, mem)
33640 v.AddArg2(v0, v1)
33641 return true
33642 }
33643
33644
33645
33646 for {
33647 if auxIntToInt64(v.AuxInt) != 16 {
33648 break
33649 }
33650 destptr := v_0
33651 mem := v_1
33652 if !(config.useSSE) {
33653 break
33654 }
33655 v.reset(OpAMD64MOVOstoreconst)
33656 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33657 v.AddArg2(destptr, mem)
33658 return true
33659 }
33660
33661
33662
33663 for {
33664 if auxIntToInt64(v.AuxInt) != 32 {
33665 break
33666 }
33667 destptr := v_0
33668 mem := v_1
33669 if !(config.useSSE) {
33670 break
33671 }
33672 v.reset(OpAMD64MOVOstoreconst)
33673 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
33674 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33675 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33676 v0.AddArg2(destptr, mem)
33677 v.AddArg2(destptr, v0)
33678 return true
33679 }
33680
33681
33682
33683 for {
33684 if auxIntToInt64(v.AuxInt) != 48 {
33685 break
33686 }
33687 destptr := v_0
33688 mem := v_1
33689 if !(config.useSSE) {
33690 break
33691 }
33692 v.reset(OpAMD64MOVOstoreconst)
33693 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
33694 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33695 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
33696 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33697 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33698 v1.AddArg2(destptr, mem)
33699 v0.AddArg2(destptr, v1)
33700 v.AddArg2(destptr, v0)
33701 return true
33702 }
33703
33704
33705
33706 for {
33707 if auxIntToInt64(v.AuxInt) != 64 {
33708 break
33709 }
33710 destptr := v_0
33711 mem := v_1
33712 if !(config.useSSE) {
33713 break
33714 }
33715 v.reset(OpAMD64MOVOstoreconst)
33716 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
33717 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33718 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
33719 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33720 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
33721 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
33722 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
33723 v2.AddArg2(destptr, mem)
33724 v1.AddArg2(destptr, v2)
33725 v0.AddArg2(destptr, v1)
33726 v.AddArg2(destptr, v0)
33727 return true
33728 }
33729
33730
33731
33732 for {
33733 s := auxIntToInt64(v.AuxInt)
33734 destptr := v_0
33735 mem := v_1
33736 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
33737 break
33738 }
33739 v.reset(OpAMD64DUFFZERO)
33740 v.AuxInt = int64ToAuxInt(s)
33741 v.AddArg2(destptr, mem)
33742 return true
33743 }
33744
33745
33746
33747 for {
33748 s := auxIntToInt64(v.AuxInt)
33749 destptr := v_0
33750 mem := v_1
33751 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
33752 break
33753 }
33754 v.reset(OpAMD64REPSTOSQ)
33755 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
33756 v0.AuxInt = int64ToAuxInt(s / 8)
33757 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
33758 v1.AuxInt = int64ToAuxInt(0)
33759 v.AddArg4(destptr, v0, v1, mem)
33760 return true
33761 }
33762 return false
33763 }
33764 func rewriteBlockAMD64(b *Block) bool {
33765 switch b.Kind {
33766 case BlockAMD64EQ:
33767
33768
33769 for b.Controls[0].Op == OpAMD64TESTL {
33770 v_0 := b.Controls[0]
33771 _ = v_0.Args[1]
33772 v_0_0 := v_0.Args[0]
33773 v_0_1 := v_0.Args[1]
33774 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33775 if v_0_0.Op != OpAMD64SHLL {
33776 continue
33777 }
33778 x := v_0_0.Args[1]
33779 v_0_0_0 := v_0_0.Args[0]
33780 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
33781 continue
33782 }
33783 y := v_0_1
33784 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
33785 v0.AddArg2(x, y)
33786 b.resetWithControl(BlockAMD64UGE, v0)
33787 return true
33788 }
33789 break
33790 }
33791
33792
33793 for b.Controls[0].Op == OpAMD64TESTQ {
33794 v_0 := b.Controls[0]
33795 _ = v_0.Args[1]
33796 v_0_0 := v_0.Args[0]
33797 v_0_1 := v_0.Args[1]
33798 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33799 if v_0_0.Op != OpAMD64SHLQ {
33800 continue
33801 }
33802 x := v_0_0.Args[1]
33803 v_0_0_0 := v_0_0.Args[0]
33804 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
33805 continue
33806 }
33807 y := v_0_1
33808 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
33809 v0.AddArg2(x, y)
33810 b.resetWithControl(BlockAMD64UGE, v0)
33811 return true
33812 }
33813 break
33814 }
33815
33816
33817
33818 for b.Controls[0].Op == OpAMD64TESTLconst {
33819 v_0 := b.Controls[0]
33820 c := auxIntToInt32(v_0.AuxInt)
33821 x := v_0.Args[0]
33822 if !(isUint32PowerOfTwo(int64(c))) {
33823 break
33824 }
33825 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
33826 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
33827 v0.AddArg(x)
33828 b.resetWithControl(BlockAMD64UGE, v0)
33829 return true
33830 }
33831
33832
33833
33834 for b.Controls[0].Op == OpAMD64TESTQconst {
33835 v_0 := b.Controls[0]
33836 c := auxIntToInt32(v_0.AuxInt)
33837 x := v_0.Args[0]
33838 if !(isUint64PowerOfTwo(int64(c))) {
33839 break
33840 }
33841 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
33842 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
33843 v0.AddArg(x)
33844 b.resetWithControl(BlockAMD64UGE, v0)
33845 return true
33846 }
33847
33848
33849
33850 for b.Controls[0].Op == OpAMD64TESTQ {
33851 v_0 := b.Controls[0]
33852 _ = v_0.Args[1]
33853 v_0_0 := v_0.Args[0]
33854 v_0_1 := v_0.Args[1]
33855 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33856 if v_0_0.Op != OpAMD64MOVQconst {
33857 continue
33858 }
33859 c := auxIntToInt64(v_0_0.AuxInt)
33860 x := v_0_1
33861 if !(isUint64PowerOfTwo(c)) {
33862 continue
33863 }
33864 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
33865 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
33866 v0.AddArg(x)
33867 b.resetWithControl(BlockAMD64UGE, v0)
33868 return true
33869 }
33870 break
33871 }
33872
33873
33874
33875 for b.Controls[0].Op == OpAMD64TESTQ {
33876 v_0 := b.Controls[0]
33877 _ = v_0.Args[1]
33878 v_0_0 := v_0.Args[0]
33879 v_0_1 := v_0.Args[1]
33880 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33881 z1 := v_0_0
33882 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
33883 continue
33884 }
33885 z1_0 := z1.Args[0]
33886 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
33887 continue
33888 }
33889 x := z1_0.Args[0]
33890 z2 := v_0_1
33891 if !(z1 == z2) {
33892 continue
33893 }
33894 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
33895 v0.AuxInt = int8ToAuxInt(63)
33896 v0.AddArg(x)
33897 b.resetWithControl(BlockAMD64UGE, v0)
33898 return true
33899 }
33900 break
33901 }
33902
33903
33904
33905 for b.Controls[0].Op == OpAMD64TESTL {
33906 v_0 := b.Controls[0]
33907 _ = v_0.Args[1]
33908 v_0_0 := v_0.Args[0]
33909 v_0_1 := v_0.Args[1]
33910 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33911 z1 := v_0_0
33912 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
33913 continue
33914 }
33915 z1_0 := z1.Args[0]
33916 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
33917 continue
33918 }
33919 x := z1_0.Args[0]
33920 z2 := v_0_1
33921 if !(z1 == z2) {
33922 continue
33923 }
33924 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
33925 v0.AuxInt = int8ToAuxInt(31)
33926 v0.AddArg(x)
33927 b.resetWithControl(BlockAMD64UGE, v0)
33928 return true
33929 }
33930 break
33931 }
33932
33933
33934
33935 for b.Controls[0].Op == OpAMD64TESTQ {
33936 v_0 := b.Controls[0]
33937 _ = v_0.Args[1]
33938 v_0_0 := v_0.Args[0]
33939 v_0_1 := v_0.Args[1]
33940 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33941 z1 := v_0_0
33942 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
33943 continue
33944 }
33945 z1_0 := z1.Args[0]
33946 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
33947 continue
33948 }
33949 x := z1_0.Args[0]
33950 z2 := v_0_1
33951 if !(z1 == z2) {
33952 continue
33953 }
33954 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
33955 v0.AuxInt = int8ToAuxInt(0)
33956 v0.AddArg(x)
33957 b.resetWithControl(BlockAMD64UGE, v0)
33958 return true
33959 }
33960 break
33961 }
33962
33963
33964
33965 for b.Controls[0].Op == OpAMD64TESTL {
33966 v_0 := b.Controls[0]
33967 _ = v_0.Args[1]
33968 v_0_0 := v_0.Args[0]
33969 v_0_1 := v_0.Args[1]
33970 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
33971 z1 := v_0_0
33972 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
33973 continue
33974 }
33975 z1_0 := z1.Args[0]
33976 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
33977 continue
33978 }
33979 x := z1_0.Args[0]
33980 z2 := v_0_1
33981 if !(z1 == z2) {
33982 continue
33983 }
33984 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
33985 v0.AuxInt = int8ToAuxInt(0)
33986 v0.AddArg(x)
33987 b.resetWithControl(BlockAMD64UGE, v0)
33988 return true
33989 }
33990 break
33991 }
33992
33993
33994
33995 for b.Controls[0].Op == OpAMD64TESTQ {
33996 v_0 := b.Controls[0]
33997 _ = v_0.Args[1]
33998 v_0_0 := v_0.Args[0]
33999 v_0_1 := v_0.Args[1]
34000 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34001 z1 := v_0_0
34002 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34003 continue
34004 }
34005 x := z1.Args[0]
34006 z2 := v_0_1
34007 if !(z1 == z2) {
34008 continue
34009 }
34010 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34011 v0.AuxInt = int8ToAuxInt(63)
34012 v0.AddArg(x)
34013 b.resetWithControl(BlockAMD64UGE, v0)
34014 return true
34015 }
34016 break
34017 }
34018
34019
34020
34021 for b.Controls[0].Op == OpAMD64TESTL {
34022 v_0 := b.Controls[0]
34023 _ = v_0.Args[1]
34024 v_0_0 := v_0.Args[0]
34025 v_0_1 := v_0.Args[1]
34026 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34027 z1 := v_0_0
34028 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34029 continue
34030 }
34031 x := z1.Args[0]
34032 z2 := v_0_1
34033 if !(z1 == z2) {
34034 continue
34035 }
34036 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34037 v0.AuxInt = int8ToAuxInt(31)
34038 v0.AddArg(x)
34039 b.resetWithControl(BlockAMD64UGE, v0)
34040 return true
34041 }
34042 break
34043 }
34044
34045
34046 for b.Controls[0].Op == OpAMD64InvertFlags {
34047 v_0 := b.Controls[0]
34048 cmp := v_0.Args[0]
34049 b.resetWithControl(BlockAMD64EQ, cmp)
34050 return true
34051 }
34052
34053
34054 for b.Controls[0].Op == OpAMD64FlagEQ {
34055 b.Reset(BlockFirst)
34056 return true
34057 }
34058
34059
34060 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34061 b.Reset(BlockFirst)
34062 b.swapSuccessors()
34063 return true
34064 }
34065
34066
34067 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34068 b.Reset(BlockFirst)
34069 b.swapSuccessors()
34070 return true
34071 }
34072
34073
34074 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34075 b.Reset(BlockFirst)
34076 b.swapSuccessors()
34077 return true
34078 }
34079
34080
34081 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
34082 b.Reset(BlockFirst)
34083 b.swapSuccessors()
34084 return true
34085 }
34086 case BlockAMD64GE:
34087
34088
34089 for b.Controls[0].Op == OpAMD64InvertFlags {
34090 v_0 := b.Controls[0]
34091 cmp := v_0.Args[0]
34092 b.resetWithControl(BlockAMD64LE, cmp)
34093 return true
34094 }
34095
34096
34097 for b.Controls[0].Op == OpAMD64FlagEQ {
34098 b.Reset(BlockFirst)
34099 return true
34100 }
34101
34102
34103 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34104 b.Reset(BlockFirst)
34105 b.swapSuccessors()
34106 return true
34107 }
34108
34109
34110 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34111 b.Reset(BlockFirst)
34112 b.swapSuccessors()
34113 return true
34114 }
34115
34116
34117 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34118 b.Reset(BlockFirst)
34119 return true
34120 }
34121
34122
34123 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
34124 b.Reset(BlockFirst)
34125 return true
34126 }
34127 case BlockAMD64GT:
34128
34129
34130 for b.Controls[0].Op == OpAMD64InvertFlags {
34131 v_0 := b.Controls[0]
34132 cmp := v_0.Args[0]
34133 b.resetWithControl(BlockAMD64LT, cmp)
34134 return true
34135 }
34136
34137
34138 for b.Controls[0].Op == OpAMD64FlagEQ {
34139 b.Reset(BlockFirst)
34140 b.swapSuccessors()
34141 return true
34142 }
34143
34144
34145 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34146 b.Reset(BlockFirst)
34147 b.swapSuccessors()
34148 return true
34149 }
34150
34151
34152 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34153 b.Reset(BlockFirst)
34154 b.swapSuccessors()
34155 return true
34156 }
34157
34158
34159 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34160 b.Reset(BlockFirst)
34161 return true
34162 }
34163
34164
34165 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
34166 b.Reset(BlockFirst)
34167 return true
34168 }
34169 case BlockIf:
34170
34171
34172 for b.Controls[0].Op == OpAMD64SETL {
34173 v_0 := b.Controls[0]
34174 cmp := v_0.Args[0]
34175 b.resetWithControl(BlockAMD64LT, cmp)
34176 return true
34177 }
34178
34179
34180 for b.Controls[0].Op == OpAMD64SETLE {
34181 v_0 := b.Controls[0]
34182 cmp := v_0.Args[0]
34183 b.resetWithControl(BlockAMD64LE, cmp)
34184 return true
34185 }
34186
34187
34188 for b.Controls[0].Op == OpAMD64SETG {
34189 v_0 := b.Controls[0]
34190 cmp := v_0.Args[0]
34191 b.resetWithControl(BlockAMD64GT, cmp)
34192 return true
34193 }
34194
34195
34196 for b.Controls[0].Op == OpAMD64SETGE {
34197 v_0 := b.Controls[0]
34198 cmp := v_0.Args[0]
34199 b.resetWithControl(BlockAMD64GE, cmp)
34200 return true
34201 }
34202
34203
34204 for b.Controls[0].Op == OpAMD64SETEQ {
34205 v_0 := b.Controls[0]
34206 cmp := v_0.Args[0]
34207 b.resetWithControl(BlockAMD64EQ, cmp)
34208 return true
34209 }
34210
34211
34212 for b.Controls[0].Op == OpAMD64SETNE {
34213 v_0 := b.Controls[0]
34214 cmp := v_0.Args[0]
34215 b.resetWithControl(BlockAMD64NE, cmp)
34216 return true
34217 }
34218
34219
34220 for b.Controls[0].Op == OpAMD64SETB {
34221 v_0 := b.Controls[0]
34222 cmp := v_0.Args[0]
34223 b.resetWithControl(BlockAMD64ULT, cmp)
34224 return true
34225 }
34226
34227
34228 for b.Controls[0].Op == OpAMD64SETBE {
34229 v_0 := b.Controls[0]
34230 cmp := v_0.Args[0]
34231 b.resetWithControl(BlockAMD64ULE, cmp)
34232 return true
34233 }
34234
34235
34236 for b.Controls[0].Op == OpAMD64SETA {
34237 v_0 := b.Controls[0]
34238 cmp := v_0.Args[0]
34239 b.resetWithControl(BlockAMD64UGT, cmp)
34240 return true
34241 }
34242
34243
34244 for b.Controls[0].Op == OpAMD64SETAE {
34245 v_0 := b.Controls[0]
34246 cmp := v_0.Args[0]
34247 b.resetWithControl(BlockAMD64UGE, cmp)
34248 return true
34249 }
34250
34251
34252 for b.Controls[0].Op == OpAMD64SETO {
34253 v_0 := b.Controls[0]
34254 cmp := v_0.Args[0]
34255 b.resetWithControl(BlockAMD64OS, cmp)
34256 return true
34257 }
34258
34259
34260 for b.Controls[0].Op == OpAMD64SETGF {
34261 v_0 := b.Controls[0]
34262 cmp := v_0.Args[0]
34263 b.resetWithControl(BlockAMD64UGT, cmp)
34264 return true
34265 }
34266
34267
34268 for b.Controls[0].Op == OpAMD64SETGEF {
34269 v_0 := b.Controls[0]
34270 cmp := v_0.Args[0]
34271 b.resetWithControl(BlockAMD64UGE, cmp)
34272 return true
34273 }
34274
34275
34276 for b.Controls[0].Op == OpAMD64SETEQF {
34277 v_0 := b.Controls[0]
34278 cmp := v_0.Args[0]
34279 b.resetWithControl(BlockAMD64EQF, cmp)
34280 return true
34281 }
34282
34283
34284 for b.Controls[0].Op == OpAMD64SETNEF {
34285 v_0 := b.Controls[0]
34286 cmp := v_0.Args[0]
34287 b.resetWithControl(BlockAMD64NEF, cmp)
34288 return true
34289 }
34290
34291
34292 for {
34293 cond := b.Controls[0]
34294 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
34295 v0.AddArg2(cond, cond)
34296 b.resetWithControl(BlockAMD64NE, v0)
34297 return true
34298 }
34299 case BlockAMD64LE:
34300
34301
34302 for b.Controls[0].Op == OpAMD64InvertFlags {
34303 v_0 := b.Controls[0]
34304 cmp := v_0.Args[0]
34305 b.resetWithControl(BlockAMD64GE, cmp)
34306 return true
34307 }
34308
34309
34310 for b.Controls[0].Op == OpAMD64FlagEQ {
34311 b.Reset(BlockFirst)
34312 return true
34313 }
34314
34315
34316 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34317 b.Reset(BlockFirst)
34318 return true
34319 }
34320
34321
34322 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34323 b.Reset(BlockFirst)
34324 return true
34325 }
34326
34327
34328 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34329 b.Reset(BlockFirst)
34330 b.swapSuccessors()
34331 return true
34332 }
34333
34334
34335 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
34336 b.Reset(BlockFirst)
34337 b.swapSuccessors()
34338 return true
34339 }
34340 case BlockAMD64LT:
34341
34342
34343 for b.Controls[0].Op == OpAMD64InvertFlags {
34344 v_0 := b.Controls[0]
34345 cmp := v_0.Args[0]
34346 b.resetWithControl(BlockAMD64GT, cmp)
34347 return true
34348 }
34349
34350
34351 for b.Controls[0].Op == OpAMD64FlagEQ {
34352 b.Reset(BlockFirst)
34353 b.swapSuccessors()
34354 return true
34355 }
34356
34357
34358 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34359 b.Reset(BlockFirst)
34360 return true
34361 }
34362
34363
34364 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34365 b.Reset(BlockFirst)
34366 return true
34367 }
34368
34369
34370 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34371 b.Reset(BlockFirst)
34372 b.swapSuccessors()
34373 return true
34374 }
34375
34376
34377 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
34378 b.Reset(BlockFirst)
34379 b.swapSuccessors()
34380 return true
34381 }
34382 case BlockAMD64NE:
34383
34384
34385 for b.Controls[0].Op == OpAMD64TESTB {
34386 v_0 := b.Controls[0]
34387 _ = v_0.Args[1]
34388 v_0_0 := v_0.Args[0]
34389 if v_0_0.Op != OpAMD64SETL {
34390 break
34391 }
34392 cmp := v_0_0.Args[0]
34393 v_0_1 := v_0.Args[1]
34394 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
34395 break
34396 }
34397 b.resetWithControl(BlockAMD64LT, cmp)
34398 return true
34399 }
34400
34401
34402 for b.Controls[0].Op == OpAMD64TESTB {
34403 v_0 := b.Controls[0]
34404 _ = v_0.Args[1]
34405 v_0_0 := v_0.Args[0]
34406 if v_0_0.Op != OpAMD64SETLE {
34407 break
34408 }
34409 cmp := v_0_0.Args[0]
34410 v_0_1 := v_0.Args[1]
34411 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
34412 break
34413 }
34414 b.resetWithControl(BlockAMD64LE, cmp)
34415 return true
34416 }
34417
34418
34419 for b.Controls[0].Op == OpAMD64TESTB {
34420 v_0 := b.Controls[0]
34421 _ = v_0.Args[1]
34422 v_0_0 := v_0.Args[0]
34423 if v_0_0.Op != OpAMD64SETG {
34424 break
34425 }
34426 cmp := v_0_0.Args[0]
34427 v_0_1 := v_0.Args[1]
34428 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
34429 break
34430 }
34431 b.resetWithControl(BlockAMD64GT, cmp)
34432 return true
34433 }
34434
34435
34436 for b.Controls[0].Op == OpAMD64TESTB {
34437 v_0 := b.Controls[0]
34438 _ = v_0.Args[1]
34439 v_0_0 := v_0.Args[0]
34440 if v_0_0.Op != OpAMD64SETGE {
34441 break
34442 }
34443 cmp := v_0_0.Args[0]
34444 v_0_1 := v_0.Args[1]
34445 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
34446 break
34447 }
34448 b.resetWithControl(BlockAMD64GE, cmp)
34449 return true
34450 }
34451
34452
34453 for b.Controls[0].Op == OpAMD64TESTB {
34454 v_0 := b.Controls[0]
34455 _ = v_0.Args[1]
34456 v_0_0 := v_0.Args[0]
34457 if v_0_0.Op != OpAMD64SETEQ {
34458 break
34459 }
34460 cmp := v_0_0.Args[0]
34461 v_0_1 := v_0.Args[1]
34462 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
34463 break
34464 }
34465 b.resetWithControl(BlockAMD64EQ, cmp)
34466 return true
34467 }
34468
34469
34470 for b.Controls[0].Op == OpAMD64TESTB {
34471 v_0 := b.Controls[0]
34472 _ = v_0.Args[1]
34473 v_0_0 := v_0.Args[0]
34474 if v_0_0.Op != OpAMD64SETNE {
34475 break
34476 }
34477 cmp := v_0_0.Args[0]
34478 v_0_1 := v_0.Args[1]
34479 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
34480 break
34481 }
34482 b.resetWithControl(BlockAMD64NE, cmp)
34483 return true
34484 }
34485
34486
34487 for b.Controls[0].Op == OpAMD64TESTB {
34488 v_0 := b.Controls[0]
34489 _ = v_0.Args[1]
34490 v_0_0 := v_0.Args[0]
34491 if v_0_0.Op != OpAMD64SETB {
34492 break
34493 }
34494 cmp := v_0_0.Args[0]
34495 v_0_1 := v_0.Args[1]
34496 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
34497 break
34498 }
34499 b.resetWithControl(BlockAMD64ULT, cmp)
34500 return true
34501 }
34502
34503
34504 for b.Controls[0].Op == OpAMD64TESTB {
34505 v_0 := b.Controls[0]
34506 _ = v_0.Args[1]
34507 v_0_0 := v_0.Args[0]
34508 if v_0_0.Op != OpAMD64SETBE {
34509 break
34510 }
34511 cmp := v_0_0.Args[0]
34512 v_0_1 := v_0.Args[1]
34513 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
34514 break
34515 }
34516 b.resetWithControl(BlockAMD64ULE, cmp)
34517 return true
34518 }
34519
34520
34521 for b.Controls[0].Op == OpAMD64TESTB {
34522 v_0 := b.Controls[0]
34523 _ = v_0.Args[1]
34524 v_0_0 := v_0.Args[0]
34525 if v_0_0.Op != OpAMD64SETA {
34526 break
34527 }
34528 cmp := v_0_0.Args[0]
34529 v_0_1 := v_0.Args[1]
34530 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
34531 break
34532 }
34533 b.resetWithControl(BlockAMD64UGT, cmp)
34534 return true
34535 }
34536
34537
34538 for b.Controls[0].Op == OpAMD64TESTB {
34539 v_0 := b.Controls[0]
34540 _ = v_0.Args[1]
34541 v_0_0 := v_0.Args[0]
34542 if v_0_0.Op != OpAMD64SETAE {
34543 break
34544 }
34545 cmp := v_0_0.Args[0]
34546 v_0_1 := v_0.Args[1]
34547 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
34548 break
34549 }
34550 b.resetWithControl(BlockAMD64UGE, cmp)
34551 return true
34552 }
34553
34554
34555 for b.Controls[0].Op == OpAMD64TESTB {
34556 v_0 := b.Controls[0]
34557 _ = v_0.Args[1]
34558 v_0_0 := v_0.Args[0]
34559 if v_0_0.Op != OpAMD64SETO {
34560 break
34561 }
34562 cmp := v_0_0.Args[0]
34563 v_0_1 := v_0.Args[1]
34564 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
34565 break
34566 }
34567 b.resetWithControl(BlockAMD64OS, cmp)
34568 return true
34569 }
34570
34571
34572 for b.Controls[0].Op == OpAMD64TESTL {
34573 v_0 := b.Controls[0]
34574 _ = v_0.Args[1]
34575 v_0_0 := v_0.Args[0]
34576 v_0_1 := v_0.Args[1]
34577 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34578 if v_0_0.Op != OpAMD64SHLL {
34579 continue
34580 }
34581 x := v_0_0.Args[1]
34582 v_0_0_0 := v_0_0.Args[0]
34583 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
34584 continue
34585 }
34586 y := v_0_1
34587 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
34588 v0.AddArg2(x, y)
34589 b.resetWithControl(BlockAMD64ULT, v0)
34590 return true
34591 }
34592 break
34593 }
34594
34595
34596 for b.Controls[0].Op == OpAMD64TESTQ {
34597 v_0 := b.Controls[0]
34598 _ = v_0.Args[1]
34599 v_0_0 := v_0.Args[0]
34600 v_0_1 := v_0.Args[1]
34601 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34602 if v_0_0.Op != OpAMD64SHLQ {
34603 continue
34604 }
34605 x := v_0_0.Args[1]
34606 v_0_0_0 := v_0_0.Args[0]
34607 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
34608 continue
34609 }
34610 y := v_0_1
34611 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
34612 v0.AddArg2(x, y)
34613 b.resetWithControl(BlockAMD64ULT, v0)
34614 return true
34615 }
34616 break
34617 }
34618
34619
34620
34621 for b.Controls[0].Op == OpAMD64TESTLconst {
34622 v_0 := b.Controls[0]
34623 c := auxIntToInt32(v_0.AuxInt)
34624 x := v_0.Args[0]
34625 if !(isUint32PowerOfTwo(int64(c))) {
34626 break
34627 }
34628 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34629 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
34630 v0.AddArg(x)
34631 b.resetWithControl(BlockAMD64ULT, v0)
34632 return true
34633 }
34634
34635
34636
34637 for b.Controls[0].Op == OpAMD64TESTQconst {
34638 v_0 := b.Controls[0]
34639 c := auxIntToInt32(v_0.AuxInt)
34640 x := v_0.Args[0]
34641 if !(isUint64PowerOfTwo(int64(c))) {
34642 break
34643 }
34644 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34645 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
34646 v0.AddArg(x)
34647 b.resetWithControl(BlockAMD64ULT, v0)
34648 return true
34649 }
34650
34651
34652
34653 for b.Controls[0].Op == OpAMD64TESTQ {
34654 v_0 := b.Controls[0]
34655 _ = v_0.Args[1]
34656 v_0_0 := v_0.Args[0]
34657 v_0_1 := v_0.Args[1]
34658 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34659 if v_0_0.Op != OpAMD64MOVQconst {
34660 continue
34661 }
34662 c := auxIntToInt64(v_0_0.AuxInt)
34663 x := v_0_1
34664 if !(isUint64PowerOfTwo(c)) {
34665 continue
34666 }
34667 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34668 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
34669 v0.AddArg(x)
34670 b.resetWithControl(BlockAMD64ULT, v0)
34671 return true
34672 }
34673 break
34674 }
34675
34676
34677
34678 for b.Controls[0].Op == OpAMD64TESTQ {
34679 v_0 := b.Controls[0]
34680 _ = v_0.Args[1]
34681 v_0_0 := v_0.Args[0]
34682 v_0_1 := v_0.Args[1]
34683 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34684 z1 := v_0_0
34685 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
34686 continue
34687 }
34688 z1_0 := z1.Args[0]
34689 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
34690 continue
34691 }
34692 x := z1_0.Args[0]
34693 z2 := v_0_1
34694 if !(z1 == z2) {
34695 continue
34696 }
34697 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34698 v0.AuxInt = int8ToAuxInt(63)
34699 v0.AddArg(x)
34700 b.resetWithControl(BlockAMD64ULT, v0)
34701 return true
34702 }
34703 break
34704 }
34705
34706
34707
34708 for b.Controls[0].Op == OpAMD64TESTL {
34709 v_0 := b.Controls[0]
34710 _ = v_0.Args[1]
34711 v_0_0 := v_0.Args[0]
34712 v_0_1 := v_0.Args[1]
34713 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34714 z1 := v_0_0
34715 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
34716 continue
34717 }
34718 z1_0 := z1.Args[0]
34719 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
34720 continue
34721 }
34722 x := z1_0.Args[0]
34723 z2 := v_0_1
34724 if !(z1 == z2) {
34725 continue
34726 }
34727 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34728 v0.AuxInt = int8ToAuxInt(31)
34729 v0.AddArg(x)
34730 b.resetWithControl(BlockAMD64ULT, v0)
34731 return true
34732 }
34733 break
34734 }
34735
34736
34737
34738 for b.Controls[0].Op == OpAMD64TESTQ {
34739 v_0 := b.Controls[0]
34740 _ = v_0.Args[1]
34741 v_0_0 := v_0.Args[0]
34742 v_0_1 := v_0.Args[1]
34743 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34744 z1 := v_0_0
34745 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34746 continue
34747 }
34748 z1_0 := z1.Args[0]
34749 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
34750 continue
34751 }
34752 x := z1_0.Args[0]
34753 z2 := v_0_1
34754 if !(z1 == z2) {
34755 continue
34756 }
34757 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34758 v0.AuxInt = int8ToAuxInt(0)
34759 v0.AddArg(x)
34760 b.resetWithControl(BlockAMD64ULT, v0)
34761 return true
34762 }
34763 break
34764 }
34765
34766
34767
34768 for b.Controls[0].Op == OpAMD64TESTL {
34769 v_0 := b.Controls[0]
34770 _ = v_0.Args[1]
34771 v_0_0 := v_0.Args[0]
34772 v_0_1 := v_0.Args[1]
34773 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34774 z1 := v_0_0
34775 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34776 continue
34777 }
34778 z1_0 := z1.Args[0]
34779 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
34780 continue
34781 }
34782 x := z1_0.Args[0]
34783 z2 := v_0_1
34784 if !(z1 == z2) {
34785 continue
34786 }
34787 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34788 v0.AuxInt = int8ToAuxInt(0)
34789 v0.AddArg(x)
34790 b.resetWithControl(BlockAMD64ULT, v0)
34791 return true
34792 }
34793 break
34794 }
34795
34796
34797
34798 for b.Controls[0].Op == OpAMD64TESTQ {
34799 v_0 := b.Controls[0]
34800 _ = v_0.Args[1]
34801 v_0_0 := v_0.Args[0]
34802 v_0_1 := v_0.Args[1]
34803 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34804 z1 := v_0_0
34805 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
34806 continue
34807 }
34808 x := z1.Args[0]
34809 z2 := v_0_1
34810 if !(z1 == z2) {
34811 continue
34812 }
34813 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
34814 v0.AuxInt = int8ToAuxInt(63)
34815 v0.AddArg(x)
34816 b.resetWithControl(BlockAMD64ULT, v0)
34817 return true
34818 }
34819 break
34820 }
34821
34822
34823
34824 for b.Controls[0].Op == OpAMD64TESTL {
34825 v_0 := b.Controls[0]
34826 _ = v_0.Args[1]
34827 v_0_0 := v_0.Args[0]
34828 v_0_1 := v_0.Args[1]
34829 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
34830 z1 := v_0_0
34831 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
34832 continue
34833 }
34834 x := z1.Args[0]
34835 z2 := v_0_1
34836 if !(z1 == z2) {
34837 continue
34838 }
34839 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
34840 v0.AuxInt = int8ToAuxInt(31)
34841 v0.AddArg(x)
34842 b.resetWithControl(BlockAMD64ULT, v0)
34843 return true
34844 }
34845 break
34846 }
34847
34848
34849 for b.Controls[0].Op == OpAMD64TESTB {
34850 v_0 := b.Controls[0]
34851 _ = v_0.Args[1]
34852 v_0_0 := v_0.Args[0]
34853 if v_0_0.Op != OpAMD64SETGF {
34854 break
34855 }
34856 cmp := v_0_0.Args[0]
34857 v_0_1 := v_0.Args[1]
34858 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
34859 break
34860 }
34861 b.resetWithControl(BlockAMD64UGT, cmp)
34862 return true
34863 }
34864
34865
34866 for b.Controls[0].Op == OpAMD64TESTB {
34867 v_0 := b.Controls[0]
34868 _ = v_0.Args[1]
34869 v_0_0 := v_0.Args[0]
34870 if v_0_0.Op != OpAMD64SETGEF {
34871 break
34872 }
34873 cmp := v_0_0.Args[0]
34874 v_0_1 := v_0.Args[1]
34875 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
34876 break
34877 }
34878 b.resetWithControl(BlockAMD64UGE, cmp)
34879 return true
34880 }
34881
34882
34883 for b.Controls[0].Op == OpAMD64TESTB {
34884 v_0 := b.Controls[0]
34885 _ = v_0.Args[1]
34886 v_0_0 := v_0.Args[0]
34887 if v_0_0.Op != OpAMD64SETEQF {
34888 break
34889 }
34890 cmp := v_0_0.Args[0]
34891 v_0_1 := v_0.Args[1]
34892 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
34893 break
34894 }
34895 b.resetWithControl(BlockAMD64EQF, cmp)
34896 return true
34897 }
34898
34899
34900 for b.Controls[0].Op == OpAMD64TESTB {
34901 v_0 := b.Controls[0]
34902 _ = v_0.Args[1]
34903 v_0_0 := v_0.Args[0]
34904 if v_0_0.Op != OpAMD64SETNEF {
34905 break
34906 }
34907 cmp := v_0_0.Args[0]
34908 v_0_1 := v_0.Args[1]
34909 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
34910 break
34911 }
34912 b.resetWithControl(BlockAMD64NEF, cmp)
34913 return true
34914 }
34915
34916
34917 for b.Controls[0].Op == OpAMD64InvertFlags {
34918 v_0 := b.Controls[0]
34919 cmp := v_0.Args[0]
34920 b.resetWithControl(BlockAMD64NE, cmp)
34921 return true
34922 }
34923
34924
34925 for b.Controls[0].Op == OpAMD64FlagEQ {
34926 b.Reset(BlockFirst)
34927 b.swapSuccessors()
34928 return true
34929 }
34930
34931
34932 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
34933 b.Reset(BlockFirst)
34934 return true
34935 }
34936
34937
34938 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
34939 b.Reset(BlockFirst)
34940 return true
34941 }
34942
34943
34944 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
34945 b.Reset(BlockFirst)
34946 return true
34947 }
34948
34949
34950 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
34951 b.Reset(BlockFirst)
34952 return true
34953 }
34954 case BlockAMD64UGE:
34955
34956
34957 for b.Controls[0].Op == OpAMD64TESTQ {
34958 v_0 := b.Controls[0]
34959 x := v_0.Args[1]
34960 if x != v_0.Args[0] {
34961 break
34962 }
34963 b.Reset(BlockFirst)
34964 return true
34965 }
34966
34967
34968 for b.Controls[0].Op == OpAMD64TESTL {
34969 v_0 := b.Controls[0]
34970 x := v_0.Args[1]
34971 if x != v_0.Args[0] {
34972 break
34973 }
34974 b.Reset(BlockFirst)
34975 return true
34976 }
34977
34978
34979 for b.Controls[0].Op == OpAMD64TESTW {
34980 v_0 := b.Controls[0]
34981 x := v_0.Args[1]
34982 if x != v_0.Args[0] {
34983 break
34984 }
34985 b.Reset(BlockFirst)
34986 return true
34987 }
34988
34989
34990 for b.Controls[0].Op == OpAMD64TESTB {
34991 v_0 := b.Controls[0]
34992 x := v_0.Args[1]
34993 if x != v_0.Args[0] {
34994 break
34995 }
34996 b.Reset(BlockFirst)
34997 return true
34998 }
34999
35000
35001 for b.Controls[0].Op == OpAMD64InvertFlags {
35002 v_0 := b.Controls[0]
35003 cmp := v_0.Args[0]
35004 b.resetWithControl(BlockAMD64ULE, cmp)
35005 return true
35006 }
35007
35008
35009 for b.Controls[0].Op == OpAMD64FlagEQ {
35010 b.Reset(BlockFirst)
35011 return true
35012 }
35013
35014
35015 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35016 b.Reset(BlockFirst)
35017 b.swapSuccessors()
35018 return true
35019 }
35020
35021
35022 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35023 b.Reset(BlockFirst)
35024 return true
35025 }
35026
35027
35028 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35029 b.Reset(BlockFirst)
35030 b.swapSuccessors()
35031 return true
35032 }
35033
35034
35035 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35036 b.Reset(BlockFirst)
35037 return true
35038 }
35039 case BlockAMD64UGT:
35040
35041
35042 for b.Controls[0].Op == OpAMD64InvertFlags {
35043 v_0 := b.Controls[0]
35044 cmp := v_0.Args[0]
35045 b.resetWithControl(BlockAMD64ULT, cmp)
35046 return true
35047 }
35048
35049
35050 for b.Controls[0].Op == OpAMD64FlagEQ {
35051 b.Reset(BlockFirst)
35052 b.swapSuccessors()
35053 return true
35054 }
35055
35056
35057 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35058 b.Reset(BlockFirst)
35059 b.swapSuccessors()
35060 return true
35061 }
35062
35063
35064 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35065 b.Reset(BlockFirst)
35066 return true
35067 }
35068
35069
35070 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35071 b.Reset(BlockFirst)
35072 b.swapSuccessors()
35073 return true
35074 }
35075
35076
35077 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35078 b.Reset(BlockFirst)
35079 return true
35080 }
35081 case BlockAMD64ULE:
35082
35083
35084 for b.Controls[0].Op == OpAMD64InvertFlags {
35085 v_0 := b.Controls[0]
35086 cmp := v_0.Args[0]
35087 b.resetWithControl(BlockAMD64UGE, cmp)
35088 return true
35089 }
35090
35091
35092 for b.Controls[0].Op == OpAMD64FlagEQ {
35093 b.Reset(BlockFirst)
35094 return true
35095 }
35096
35097
35098 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35099 b.Reset(BlockFirst)
35100 return true
35101 }
35102
35103
35104 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35105 b.Reset(BlockFirst)
35106 b.swapSuccessors()
35107 return true
35108 }
35109
35110
35111 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35112 b.Reset(BlockFirst)
35113 return true
35114 }
35115
35116
35117 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35118 b.Reset(BlockFirst)
35119 b.swapSuccessors()
35120 return true
35121 }
35122 case BlockAMD64ULT:
35123
35124
35125 for b.Controls[0].Op == OpAMD64TESTQ {
35126 v_0 := b.Controls[0]
35127 x := v_0.Args[1]
35128 if x != v_0.Args[0] {
35129 break
35130 }
35131 b.Reset(BlockFirst)
35132 b.swapSuccessors()
35133 return true
35134 }
35135
35136
35137 for b.Controls[0].Op == OpAMD64TESTL {
35138 v_0 := b.Controls[0]
35139 x := v_0.Args[1]
35140 if x != v_0.Args[0] {
35141 break
35142 }
35143 b.Reset(BlockFirst)
35144 b.swapSuccessors()
35145 return true
35146 }
35147
35148
35149 for b.Controls[0].Op == OpAMD64TESTW {
35150 v_0 := b.Controls[0]
35151 x := v_0.Args[1]
35152 if x != v_0.Args[0] {
35153 break
35154 }
35155 b.Reset(BlockFirst)
35156 b.swapSuccessors()
35157 return true
35158 }
35159
35160
35161 for b.Controls[0].Op == OpAMD64TESTB {
35162 v_0 := b.Controls[0]
35163 x := v_0.Args[1]
35164 if x != v_0.Args[0] {
35165 break
35166 }
35167 b.Reset(BlockFirst)
35168 b.swapSuccessors()
35169 return true
35170 }
35171
35172
35173 for b.Controls[0].Op == OpAMD64InvertFlags {
35174 v_0 := b.Controls[0]
35175 cmp := v_0.Args[0]
35176 b.resetWithControl(BlockAMD64UGT, cmp)
35177 return true
35178 }
35179
35180
35181 for b.Controls[0].Op == OpAMD64FlagEQ {
35182 b.Reset(BlockFirst)
35183 b.swapSuccessors()
35184 return true
35185 }
35186
35187
35188 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
35189 b.Reset(BlockFirst)
35190 return true
35191 }
35192
35193
35194 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
35195 b.Reset(BlockFirst)
35196 b.swapSuccessors()
35197 return true
35198 }
35199
35200
35201 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
35202 b.Reset(BlockFirst)
35203 return true
35204 }
35205
35206
35207 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
35208 b.Reset(BlockFirst)
35209 b.swapSuccessors()
35210 return true
35211 }
35212 }
35213 return false
35214 }
35215
View as plain text