1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Lowering arithmetic
6 (Add(64|Ptr) ...) => (ADD ...)
7 (Add(32|16|8) ...) => (ADDW ...)
8 (Add32F x y) => (Select0 (FADDS x y))
9 (Add64F x y) => (Select0 (FADD x y))
10
11 (Sub(64|Ptr) ...) => (SUB ...)
12 (Sub(32|16|8) ...) => (SUBW ...)
13 (Sub32F x y) => (Select0 (FSUBS x y))
14 (Sub64F x y) => (Select0 (FSUB x y))
15
16 (Mul64 ...) => (MULLD ...)
17 (Mul(32|16|8) ...) => (MULLW ...)
18 (Mul32F ...) => (FMULS ...)
19 (Mul64F ...) => (FMUL ...)
20 (Mul64uhilo ...) => (MLGR ...)
21
22 (Div32F ...) => (FDIVS ...)
23 (Div64F ...) => (FDIV ...)
24
25 (Div64 x y) => (DIVD x y)
26 (Div64u ...) => (DIVDU ...)
27 // DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
28 // so a sign/zero extension of the dividend is required.
29 (Div32 x y) => (DIVW (MOVWreg x) y)
30 (Div32u x y) => (DIVWU (MOVWZreg x) y)
31 (Div16 x y) => (DIVW (MOVHreg x) (MOVHreg y))
32 (Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y))
33 (Div8 x y) => (DIVW (MOVBreg x) (MOVBreg y))
34 (Div8u x y) => (DIVWU (MOVBZreg x) (MOVBZreg y))
35
36 (Hmul(64|64u) ...) => (MULH(D|DU) ...)
37 (Hmul32 x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
38 (Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
39
40 (Mod64 x y) => (MODD x y)
41 (Mod64u ...) => (MODDU ...)
42 // MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
43 // so a sign/zero extension of the dividend is required.
44 (Mod32 x y) => (MODW (MOVWreg x) y)
45 (Mod32u x y) => (MODWU (MOVWZreg x) y)
46 (Mod16 x y) => (MODW (MOVHreg x) (MOVHreg y))
47 (Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y))
48 (Mod8 x y) => (MODW (MOVBreg x) (MOVBreg y))
49 (Mod8u x y) => (MODWU (MOVBZreg x) (MOVBZreg y))
50
51 // (x + y) / 2 with x>=y -> (x - y) / 2 + y
52 (Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
53
54 (And64 ...) => (AND ...)
55 (And(32|16|8) ...) => (ANDW ...)
56
57 (Or64 ...) => (OR ...)
58 (Or(32|16|8) ...) => (ORW ...)
59
60 (Xor64 ...) => (XOR ...)
61 (Xor(32|16|8) ...) => (XORW ...)
62
63 (Neg64 ...) => (NEG ...)
64 (Neg(32|16|8) ...) => (NEGW ...)
65 (Neg32F ...) => (FNEGS ...)
66 (Neg64F ...) => (FNEG ...)
67
68 (Com64 ...) => (NOT ...)
69 (Com(32|16|8) ...) => (NOTW ...)
70 (NOT x) => (XOR (MOVDconst [-1]) x)
71 (NOTW x) => (XORWconst [-1] x)
72
73 // Lowering boolean ops
74 (AndB ...) => (ANDW ...)
75 (OrB ...) => (ORW ...)
76 (Not x) => (XORWconst [1] x)
77
78 // Lowering pointer arithmetic
79 (OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr)
80 (OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr)
81 (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
82
83 // TODO: optimize these cases?
84 (Ctz64NonZero ...) => (Ctz64 ...)
85 (Ctz32NonZero ...) => (Ctz32 ...)
86
87 // Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
88 (Ctz64 <t> x) => (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
89 (Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
90
91 (BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
92
93 // POPCNT treats the input register as a vector of 8 bytes, producing
94 // a population count for each individual byte. For inputs larger than
95 // a single byte we therefore need to sum the individual bytes produced
96 // by the POPCNT instruction. For example, the following instruction
97 // sequence could be used to calculate the population count of a 4-byte
98 // value:
99 //
100 // MOVD $0x12345678, R1 // R1=0x12345678 <-- input
101 // POPCNT R1, R2 // R2=0x02030404
102 // SRW $16, R2, R3 // R3=0x00000203
103 // ADDW R2, R3, R4 // R4=0x02030607
104 // SRW $8, R4, R5 // R5=0x00020306
105 // ADDW R4, R5, R6 // R6=0x0205090d
106 // MOVBZ R6, R7 // R7=0x0000000d <-- result is 13
107 //
108 (PopCount8 x) => (POPCNT (MOVBZreg x))
109 (PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
110 (PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
111 (PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
112
113 // SumBytes{2,4,8} pseudo operations sum the values of the rightmost
114 // 2, 4 or 8 bytes respectively. The result is a single byte however
115 // other bytes might contain junk so a zero extension is required if
116 // the desired output type is larger than 1 byte.
117 (SumBytes2 x) => (ADDW (SRWconst <typ.UInt8> x [8]) x)
118 (SumBytes4 x) => (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
119 (SumBytes8 x) => (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
120
121 (Bswap64 ...) => (MOVDBR ...)
122 (Bswap32 ...) => (MOVWBR ...)
123
124 // add with carry
125 (Select0 (Add64carry x y c))
126 => (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
127 (Select1 (Add64carry x y c))
128 => (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
129
130 // subtract with borrow
131 (Select0 (Sub64borrow x y c))
132 => (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
133 (Select1 (Sub64borrow x y c))
134 => (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
135
136 // math package intrinsics
137 (Sqrt ...) => (FSQRT ...)
138 (Floor x) => (FIDBR [7] x)
139 (Ceil x) => (FIDBR [6] x)
140 (Trunc x) => (FIDBR [5] x)
141 (RoundToEven x) => (FIDBR [4] x)
142 (Round x) => (FIDBR [1] x)
143 (FMA x y z) => (FMADD z x y)
144
145 (Sqrt32 ...) => (FSQRTS ...)
146
147 // Atomic loads and stores.
148 // The SYNC instruction (fast-BCR-serialization) prevents store-load
149 // reordering. Other sequences of memory operations (load-load,
150 // store-store and load-store) are already guaranteed not to be reordered.
151 (AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem)
152 (AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem))
153
154 // Store-release doesn't require store-load ordering.
155 (AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem)
156
157 // Atomic adds.
158 (AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem))
159 (AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem))
160 (Select0 <t> (AddTupleFirst32 val tuple)) => (ADDW val (Select0 <t> tuple))
161 (Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
162 (Select0 <t> (AddTupleFirst64 val tuple)) => (ADD val (Select0 <t> tuple))
163 (Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
164
165 // Atomic exchanges.
166 (AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem)
167 (AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem)
168
169 // Atomic compare and swap.
170 (AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem)
171 (AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem)
172
173 // Atomic and: *(*uint8)(ptr) &= val
174 //
175 // Round pointer down to nearest word boundary and pad value with ones before
176 // applying atomic AND operation to target word.
177 //
178 // *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3))
179 //
180 (AtomicAnd8 ptr val mem)
181 => (LANfloor
182 ptr
183 (RLL <typ.UInt32>
184 (ORWconst <typ.UInt32> val [-1<<8])
185 (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
186 mem)
187
188 // Atomic or: *(*uint8)(ptr) |= val
189 //
190 // Round pointer down to nearest word boundary and pad value with zeros before
191 // applying atomic OR operation to target word.
192 //
193 // *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3))
194 //
195 (AtomicOr8 ptr val mem)
196 => (LAOfloor
197 ptr
198 (SLW <typ.UInt32>
199 (MOVBZreg <typ.UInt32> val)
200 (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
201 mem)
202
203 (AtomicAnd32 ...) => (LAN ...)
204 (AtomicOr32 ...) => (LAO ...)
205
206 // Lowering extension
207 // Note: we always extend to 64 bits even though some ops don't need that many result bits.
208 (SignExt8to(16|32|64) ...) => (MOVBreg ...)
209 (SignExt16to(32|64) ...) => (MOVHreg ...)
210 (SignExt32to64 ...) => (MOVWreg ...)
211
212 (ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
213 (ZeroExt16to(32|64) ...) => (MOVHZreg ...)
214 (ZeroExt32to64 ...) => (MOVWZreg ...)
215
216 (Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
217
218 // Lowering truncation
219 // Because we ignore high parts of registers, truncates are just copies.
220 (Trunc(16|32|64)to8 ...) => (Copy ...)
221 (Trunc(32|64)to16 ...) => (Copy ...)
222 (Trunc64to32 ...) => (Copy ...)
223
224 // Lowering float <-> int
225 (Cvt32to32F ...) => (CEFBRA ...)
226 (Cvt32to64F ...) => (CDFBRA ...)
227 (Cvt64to32F ...) => (CEGBRA ...)
228 (Cvt64to64F ...) => (CDGBRA ...)
229
230 (Cvt32Fto32 ...) => (CFEBRA ...)
231 (Cvt32Fto64 ...) => (CGEBRA ...)
232 (Cvt64Fto32 ...) => (CFDBRA ...)
233 (Cvt64Fto64 ...) => (CGDBRA ...)
234
235 // Lowering float <-> uint
236 (Cvt32Uto32F ...) => (CELFBR ...)
237 (Cvt32Uto64F ...) => (CDLFBR ...)
238 (Cvt64Uto32F ...) => (CELGBR ...)
239 (Cvt64Uto64F ...) => (CDLGBR ...)
240
241 (Cvt32Fto32U ...) => (CLFEBR ...)
242 (Cvt32Fto64U ...) => (CLGEBR ...)
243 (Cvt64Fto32U ...) => (CLFDBR ...)
244 (Cvt64Fto64U ...) => (CLGDBR ...)
245
246 // Lowering float32 <-> float64
247 (Cvt32Fto64F ...) => (LDEBR ...)
248 (Cvt64Fto32F ...) => (LEDBR ...)
249
250 (CvtBoolToUint8 ...) => (Copy ...)
251
252 (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
253
254 // Lowering shifts
255
256 // Lower bounded shifts first. No need to check shift value.
257 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
258 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
259 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
260 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
261 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
262 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
263 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
264 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
265 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
266 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
267 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
268 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
269
270 // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
271 // result = shift >= 64 ? 0 : arg << shift
272 (Lsh(64|32|16|8)x64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
273 (Lsh(64|32|16|8)x32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
274 (Lsh(64|32|16|8)x16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
275 (Lsh(64|32|16|8)x8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
276
277 (Rsh(64|32)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
278 (Rsh(64|32)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
279 (Rsh(64|32)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
280 (Rsh(64|32)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
281
282 (Rsh(16|8)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
283 (Rsh(16|8)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
284 (Rsh(16|8)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
285 (Rsh(16|8)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
286
287 // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
288 // We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63.
289 // result = arg >> (shift >= 64 ? 63 : shift)
290 (Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
291 (Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
292 (Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
293 (Rsh(64|32)x8 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
294
295 (Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
296 (Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
297 (Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
298 (Rsh(16|8)x8 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
299
300 // Lowering rotates
301 (RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
302 (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
303 (RotateLeft32 ...) => (RLL ...)
304 (RotateLeft64 ...) => (RLLG ...)
305
306 // Lowering comparisons
307 (Less64 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
308 (Less32 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
309 (Less(16|8) x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
310 (Less64U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
311 (Less32U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
312 (Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
313 (Less64F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
314 (Less32F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
315
316 (Leq64 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
317 (Leq32 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
318 (Leq(16|8) x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
319 (Leq64U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
320 (Leq32U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
321 (Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
322 (Leq64F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
323 (Leq32F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
324
325 (Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
326 (Eq32 x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
327 (Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
328 (Eq64F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
329 (Eq32F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
330
331 (Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
332 (Neq32 x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
333 (Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
334 (Neq64F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
335 (Neq32F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
336
337 // Lowering loads
338 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
339 (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
340 (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
341 (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
342 (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
343 (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBload ptr mem)
344 (Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) => (MOVBZload ptr mem)
345 (Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
346 (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
347
348 // Lowering stores
349 // These more-specific FP versions of Store pattern should come first.
350 (Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
351 (Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
352
353 (Store {t} ptr val mem) && t.Size() == 8 => (MOVDstore ptr val mem)
354 (Store {t} ptr val mem) && t.Size() == 4 => (MOVWstore ptr val mem)
355 (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
356 (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
357
358 // Lowering moves
359
360 // Load and store for small copies.
361 (Move [0] _ _ mem) => mem
362 (Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
363 (Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem)
364 (Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem)
365 (Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
366 (Move [16] dst src mem) =>
367 (MOVDstore [8] dst (MOVDload [8] src mem)
368 (MOVDstore dst (MOVDload src mem) mem))
369 (Move [24] dst src mem) =>
370 (MOVDstore [16] dst (MOVDload [16] src mem)
371 (MOVDstore [8] dst (MOVDload [8] src mem)
372 (MOVDstore dst (MOVDload src mem) mem)))
373 (Move [3] dst src mem) =>
374 (MOVBstore [2] dst (MOVBZload [2] src mem)
375 (MOVHstore dst (MOVHZload src mem) mem))
376 (Move [5] dst src mem) =>
377 (MOVBstore [4] dst (MOVBZload [4] src mem)
378 (MOVWstore dst (MOVWZload src mem) mem))
379 (Move [6] dst src mem) =>
380 (MOVHstore [4] dst (MOVHZload [4] src mem)
381 (MOVWstore dst (MOVWZload src mem) mem))
382 (Move [7] dst src mem) =>
383 (MOVBstore [6] dst (MOVBZload [6] src mem)
384 (MOVHstore [4] dst (MOVHZload [4] src mem)
385 (MOVWstore dst (MOVWZload src mem) mem)))
386
387 // MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
388 (Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
389 (MVC [makeValAndOff(int32(s), 0)] dst src mem)
390 (Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
391 (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
392 (Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
393 (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
394 (Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
395 (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
396
397 // Move more than 1024 bytes using a loop.
398 (Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
399 (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
400
401 // Lowering Zero instructions
402 (Zero [0] _ mem) => mem
403 (Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
404 (Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem)
405 (Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
406 (Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
407 (Zero [3] destptr mem) =>
408 (MOVBstoreconst [makeValAndOff(0,2)] destptr
409 (MOVHstoreconst [0] destptr mem))
410 (Zero [5] destptr mem) =>
411 (MOVBstoreconst [makeValAndOff(0,4)] destptr
412 (MOVWstoreconst [0] destptr mem))
413 (Zero [6] destptr mem) =>
414 (MOVHstoreconst [makeValAndOff(0,4)] destptr
415 (MOVWstoreconst [0] destptr mem))
416 (Zero [7] destptr mem) =>
417 (MOVWstoreconst [makeValAndOff(0,3)] destptr
418 (MOVWstoreconst [0] destptr mem))
419
420 (Zero [s] destptr mem) && s > 0 && s <= 1024 =>
421 (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
422
423 // Zero more than 1024 bytes using a loop.
424 (Zero [s] destptr mem) && s > 1024 =>
425 (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
426
427 // Lowering constants
428 (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
429 (Const(32|64)F ...) => (FMOV(S|D)const ...)
430 (ConstNil) => (MOVDconst [0])
431 (ConstBool [t]) => (MOVDconst [b2i(t)])
432
433 // Lowering calls
434 (StaticCall ...) => (CALLstatic ...)
435 (ClosureCall ...) => (CALLclosure ...)
436 (InterCall ...) => (CALLinter ...)
437 (TailCall ...) => (CALLtail ...)
438
439 // Miscellaneous
440 (IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
441 (IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
442 (IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
443 (NilCheck ...) => (LoweredNilCheck ...)
444 (GetG ...) => (LoweredGetG ...)
445 (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
446 (GetCallerSP ...) => (LoweredGetCallerSP ...)
447 (GetCallerPC ...) => (LoweredGetCallerPC ...)
448 (Addr {sym} base) => (MOVDaddr {sym} base)
449 (LocalAddr {sym} base _) => (MOVDaddr {sym} base)
450 (ITab (Load ptr mem)) => (MOVDload ptr mem)
451
452 // block rewrites
453 (If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
454
455 // Write barrier.
456 (WB ...) => (LoweredWB ...)
457
458 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
459 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
460 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
461
462 // ***************************
463 // Above: lowering rules
464 // Below: optimizations
465 // ***************************
466 // TODO: Should the optimizations be a separate pass?
467
468 // Note: when removing unnecessary sign/zero extensions.
469 //
470 // After a value is spilled it is restored using a sign- or zero-extension
471 // to register-width as appropriate for its type. For example, a uint8 will
472 // be restored using a MOVBZ (llgc) instruction which will zero extend the
473 // 8-bit value to 64-bits.
474 //
475 // This is a hazard when folding sign- and zero-extensions since we need to
476 // ensure not only that the value in the argument register is correctly
477 // extended but also that it will still be correctly extended if it is
478 // spilled and restored.
479 //
480 // In general this means we need type checks when the RHS of a rule is an
481 // OpCopy (i.e. "(... x:(...) ...) -> x").
482
483 // Merge double extensions.
484 (MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
485 (MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
486 (MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
487
488 // Bypass redundant sign extensions.
489 (MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
490 (MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
491 (MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
492 (MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
493 (MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
494 (MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
495
496 // Bypass redundant zero extensions.
497 (MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
498 (MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
499 (MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
500 (MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
501 (MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
502 (MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
503
504 // Remove zero extensions after zero extending load.
505 // Note: take care that if x is spilled it is restored correctly.
506 (MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
507 (MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
508 (MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x
509
510 // Remove sign extensions after sign extending load.
511 // Note: take care that if x is spilled it is restored correctly.
512 (MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
513 (MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
514 (MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
515
516 // Remove sign extensions after zero extending load.
517 // These type checks are probably unnecessary but do them anyway just in case.
518 (MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
519 (MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
520
521 // Fold sign and zero extensions into loads.
522 //
523 // Note: The combined instruction must end up in the same block
524 // as the original load. If not, we end up making a value with
525 // memory type live in two different blocks, which can lead to
526 // multiple memory values alive simultaneously.
527 //
528 // Make sure we don't combine these ops if the load has another use.
529 // This prevents a single load from being split into multiple loads
530 // which then might return different values. See test/atomicload.go.
531 (MOV(B|H|W)Zreg <t> x:(MOV(B|H|W)load [o] {s} p mem))
532 && x.Uses == 1
533 && clobber(x)
534 => @x.Block (MOV(B|H|W)Zload <t> [o] {s} p mem)
535 (MOV(B|H|W)reg <t> x:(MOV(B|H|W)Zload [o] {s} p mem))
536 && x.Uses == 1
537 && clobber(x)
538 => @x.Block (MOV(B|H|W)load <t> [o] {s} p mem)
539
540 // Remove zero extensions after argument load.
541 (MOVBZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() == 1 => x
542 (MOVHZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 2 => x
543 (MOVWZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 4 => x
544
545 // Remove sign extensions after argument load.
546 (MOVBreg x:(Arg <t>)) && t.IsSigned() && t.Size() == 1 => x
547 (MOVHreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 2 => x
548 (MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x
549
550 // Fold zero extensions into constants.
551 (MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))])
552 (MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
553 (MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
554
555 // Fold sign extensions into constants.
556 (MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
557 (MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
558 (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
559
560 // Remove zero extension of conditional move.
561 // Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering.
562 (MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
563 && int64(uint8(c)) == c
564 && int64(uint8(d)) == d
565 && (!x.Type.IsSigned() || x.Type.Size() > 1)
566 => x
567
568 // Fold boolean tests into blocks.
569 // Note: this must match If statement lowering.
570 (CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
571 && int32(x) != 0
572 => (BRC {d} cmp yes no)
573
574 // Canonicalize BRC condition code mask by removing impossible conditions.
575 // Integer comparisons cannot generate the unordered condition.
576 (BRC {c} x:((CMP|CMPW|CMPU|CMPWU) _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
577 (BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
578
579 // Compare-and-branch.
580 // Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
581 (BRC {c} (CMP x y) yes no) => (CGRJ {c&^s390x.Unordered} x y yes no)
582 (BRC {c} (CMPW x y) yes no) => (CRJ {c&^s390x.Unordered} x y yes no)
583 (BRC {c} (CMPU x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no)
584 (BRC {c} (CMPWU x y) yes no) => (CLRJ {c&^s390x.Unordered} x y yes no)
585
586 // Compare-and-branch (immediate).
587 // Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
588 (BRC {c} (CMPconst x [y]) yes no) && y == int32( int8(y)) => (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
589 (BRC {c} (CMPWconst x [y]) yes no) && y == int32( int8(y)) => (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
590 (BRC {c} (CMPUconst x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
591 (BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
592
593 // Absorb immediate into compare-and-branch.
594 (C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) => (C(I|GI)J {c} x [ int8(y)] yes no)
595 (CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no)
596 (C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) => (C(I|GI)J {c.ReverseComparison()} y [ int8(x)] yes no)
597 (CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no)
598
599 // Prefer comparison with immediate to compare-and-branch.
600 (CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPconst x [int32(y)]) yes no)
601 (CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPWconst x [int32(y)]) yes no)
602 (CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst x [int32(y)]) yes no)
603 (CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no)
604 (CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
605 (CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
606 (CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
607 (CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
608
609 // Absorb sign/zero extensions into 32-bit compare-and-branch.
610 (CIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ {c} x [y] yes no)
611 (CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no)
612
613 // Bring out-of-range signed immediates into range by varying branch condition.
614 (BRC {s390x.Less} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
615 (BRC {s390x.Less} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.LessOrEqual} x [ 127] yes no)
616 (BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) => (CGIJ {s390x.Less} x [-128] yes no)
617 (BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) => (CIJ {s390x.Less} x [-128] yes no)
618 (BRC {s390x.Greater} (CMPconst x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
619 (BRC {s390x.Greater} (CMPWconst x [-129]) yes no) => (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
620 (BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.Greater} x [ 127] yes no)
621 (BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.Greater} x [ 127] yes no)
622
623 // Bring out-of-range unsigned immediates into range by varying branch condition.
624 (BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no)
625 (BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.Greater} x [255] yes no)
626
627 // Bring out-of-range immediates into range by switching signedness (only == and !=).
628 (BRC {c} (CMPconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no)
629 (BRC {c} (CMPWconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ {c} x [uint8(y)] yes no)
630 (BRC {c} (CMPUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ {c} x [ int8(y)] yes no)
631 (BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no)
632
633 // Fold constants into instructions.
634 (ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [int32(c)] x)
635 (ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
636
637 (SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
638 (SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst <v.Type> x [int32(c)]))
639 (SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)])
640 (SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst <v.Type> x [int32(c)]))
641
642 (MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x)
643 (MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x)
644
645 // NILF instructions leave the high 32 bits unchanged which is
646 // equivalent to the leftmost 32 bits being set.
647 // TODO(mundaym): modify the assembler to accept 64-bit values
648 // and use isU32Bit(^c).
649 (AND x (MOVDconst [c]))
650 && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
651 => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
652 (AND x (MOVDconst [c]))
653 && is32Bit(c)
654 && c < 0
655 => (ANDconst [c] x)
656 (AND x (MOVDconst [c]))
657 && is32Bit(c)
658 && c >= 0
659 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
660
661 (ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
662
663 ((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
664
665 ((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x)
666 ((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
667
668 // Constant shifts.
669 (S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
670 (S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
671 (S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
672 (SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
673
674 // Shifts only use the rightmost 6 bits of the shift value.
675 (S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
676 && r.Amount == 0
677 && r.OutMask()&63 == 63
678 => (S(LD|RD|RAD|LW|RW|RAW) x y)
679 (S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
680 => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
681 (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
682 => (S(LD|RD|RAD|LW|RW|RAW) x y)
683 (SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y)
684 (SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y)
685 (SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
686 (SLW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW x y)
687 (SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y)
688 (SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
689
690 // Match rotate by constant.
691 (RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
692 (RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
693
694 // Match rotate by constant pattern.
695 ((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
696 ((ADD|OR|XOR)W (SLWconst x [c]) (SRWconst x [32-c])) => (RLLconst x [c])
697
698 // Signed 64-bit comparison with immediate.
699 (CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
700 (CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
701
702 // Unsigned 64-bit comparison with immediate.
703 (CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)])
704 (CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)]))
705
706 // Signed and unsigned 32-bit comparison with immediate.
707 (CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
708 (CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
709
710 // Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
711 (SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
712
713 // Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
714 (SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
715
716 // Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
717 (RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
718 (RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
719 (RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
720
721 // Absorb 'rotate then insert selected bits [into zero]' into zero extension.
722 (MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
723 (MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
724 (MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
725
726 // Absorb shift into 'rotate then insert selected bits [into zero]'.
727 //
728 // Any unsigned shift can be represented as a rotate and mask operation:
729 //
730 // x << c => RotateLeft64(x, c) & (^uint64(0) << c)
731 // x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
732 //
733 // Therefore when a shift is used as the input to a rotate then insert
734 // selected bits instruction we can merge the two together. We just have
735 // to be careful that the resultant mask is representable (non-zero and
736 // contiguous). For example, assuming that x is variable and c, y and m
737 // are constants, a shift followed by a rotate then insert selected bits
738 // could be represented as:
739 //
740 // RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
741 //
742 // We can split the rotation by y into two, one rotate for x and one for
743 // the mask:
744 //
745 // RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
746 //
747 // The rotations of x by c followed by y can then be combined:
748 //
749 // RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
750 // ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
751 // rotate mask
752 //
753 // To perform this optimization we therefore just need to check that it
754 // is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
755 // bits mask (i.e. that the resultant mask is non-zero and contiguous).
756 //
757 (RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
758 (RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
759
760 // Absorb 'rotate then insert selected bits [into zero]' into left shift.
761 (SLDconst (RISBGZ x {r}) [c])
762 && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
763 => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
764
765 // Absorb 'rotate then insert selected bits [into zero]' into right shift.
766 (SRDconst (RISBGZ x {r}) [c])
767 && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
768 => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
769
770 // Merge 'rotate then insert selected bits [into zero]' instructions together.
771 (RISBGZ (RISBGZ x {y}) {z})
772 && z.InMerge(y.OutMask()) != nil
773 => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
774
775 // Convert RISBGZ into 64-bit shift (helps CSE).
776 (RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
777 (RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
778
779 // Optimize single bit isolation when it is known to be equivalent to
780 // the most significant bit due to mask produced by arithmetic shift.
781 // Simply isolate the most significant bit itself and place it in the
782 // correct position.
783 //
784 // Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
785 (RISBGZ (SRADconst x [c]) {r})
786 && r.Start == r.End // single bit selected
787 && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
788 => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
789
790 // Canonicalize the order of arguments to comparisons - helps with CSE.
791 ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
792
793 // Use sign/zero extend instead of RISBGZ.
794 (RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
795 (RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
796 (RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
797
798 // Use sign/zero extend instead of ANDW.
799 (ANDWconst [0x00ff] x) => (MOVBZreg x)
800 (ANDWconst [0xffff] x) => (MOVHZreg x)
801
802 // Strength reduce multiplication to the sum (or difference) of two powers of two.
803 //
804 // Examples:
805 // 5x -> 4x + 1x
806 // 10x -> 8x + 2x
807 // 120x -> 128x - 8x
808 // -120x -> 8x - 128x
809 //
810 // We know that the rightmost bit of any positive value, once isolated, must either
811 // be a power of 2 (because it is a single bit) or 0 (if the original value is 0).
812 // In all of these rules we use a rightmost bit calculation to determine one operand
813 // for the addition or subtraction. We then just need to calculate if the other
814 // operand is a valid power of 2 before we can match the rule.
815 //
816 // Notes:
817 // - the generic rules have already matched single powers of two so we ignore them here
818 // - isPowerOfTwo32 asserts that its argument is greater than 0
819 // - c&(c-1) = clear rightmost bit
820 // - c&^(c-1) = isolate rightmost bit
821
822 // c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
823 (MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c&(c-1))
824 => ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
825 (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
826
827 // c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
828 (MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c+(c&^(c-1)))
829 => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
830 (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
831
832 // c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
833 (MULL(D|W)const <t> x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1)))
834 => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
835 (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
836
837 // Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
838 (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
839 (ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
840 (ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx)
841
842 // fold ADDconst into MOVDaddrx
843 (ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
844 (MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
845 (MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
846
847 // reverse ordering of compare instruction
848 (LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp)
849
850 // replace load from same location as preceding store with copy
851 (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
852 (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x)
853 (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x)
854 (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x)
855 (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x)
856 (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x)
857 (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x)
858 (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x)
859 (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x)
860 (FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
861 (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
862
863 // prefer FPR <-> GPR moves over combined load ops
864 (MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR <t> y))
865 (ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD x (LGDR <t> y))
866 (SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB x (LGDR <t> y))
867 (ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR x (LGDR <t> y))
868 (ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND x (LGDR <t> y))
869 (XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR x (LGDR <t> y))
870
871 // detect attempts to set/clear the sign bit
872 // may need to be reworked when NIHH/OIHH are added
873 (RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
874 (LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
875 (OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
876 (LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
877
878 // detect attempts to set the sign bit with load
879 (LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
880
881 // detect copysign
882 (OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
883 && r == s390x.NewRotateParams(0, 0, 0)
884 => (LGDR (CPSDR <t> y x))
885 (OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
886 && c >= 0
887 && r == s390x.NewRotateParams(0, 0, 0)
888 => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
889 (CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
890 (CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y)
891
892 // absorb negations into set/clear sign bit
893 (FNEG (LPDFR x)) => (LNDFR x)
894 (FNEG (LNDFR x)) => (LPDFR x)
895 (FNEGS (LPDFR x)) => (LNDFR x)
896 (FNEGS (LNDFR x)) => (LPDFR x)
897
898 // no need to convert float32 to float64 to set/clear sign bit
899 (LEDBR (LPDFR (LDEBR x))) => (LPDFR x)
900 (LEDBR (LNDFR (LDEBR x))) => (LNDFR x)
901
902 // remove unnecessary FPR <-> GPR moves
903 (LDGR (LGDR x)) => x
904 (LGDR (LDGR x)) => x
905
906 // Don't extend before storing
907 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
908 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
909 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
910 (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
911 (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
912 (MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
913
914 // Fold constants into memory operations.
915 // Note that this is not always a good idea because if not all the uses of
916 // the ADDconst get eliminated, we still have to compute the ADDconst and we now
917 // have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
918 // Nevertheless, let's do it!
919 (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem)
920 (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
921 (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem)
922 (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
923 (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem)
924 (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem)
925 (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem)
926 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem)
927 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem)
928
929 (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem)
930 (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
931 (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem)
932 (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
933 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem)
934 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem)
935
936 (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem)
937 (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem)
938 (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem)
939 (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem)
940 (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem)
941 (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem)
942
943 (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem)
944 (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem)
945 (ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem)
946 (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem)
947 (XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem)
948 (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem)
949
950 // Fold constants into stores.
951 (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
952 (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
953 (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
954 (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
955 (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
956 (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
957 (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
958 (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
959
960 // Fold address offsets into constant stores.
961 (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
962 (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
963 (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
964 (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
965 (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
966 (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
967 (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) =>
968 (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
969
970 // Merge address calculations into loads and stores.
971 // Offsets from SB must not be merged into unaligned memory accesses because
972 // loads/stores using PC-relative addressing directly must be aligned to the
973 // size of the target.
974 (MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
975 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
976 (MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
977 (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
978 (MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
979 (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
980 (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
981 (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
982 (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
983 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
984 (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
985 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
986
987 (MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
988 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
989 (MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
990 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
991 (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
992 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
993
994 (MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
995 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
996 (MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
997 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
998 (MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
999 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1000 (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1001 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1002 (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1003 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1004 (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1005 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1006
1007 (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1008 (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1009 (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1010 (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1011 (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1012 (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1013
1014 (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1015 (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1016 (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1017 (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1018 (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1019 (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
1020
1021 // Cannot store constant to SB directly (no 'move relative long immediate' instructions).
1022 (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1023 (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1024 (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1025 (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1026 (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1027 (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1028 (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
1029 (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1030
1031 // MOVDaddr into MOVDaddridx
1032 (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
1033 (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
1034 (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
1035 (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
1036
1037 // Absorb InvertFlags into branches.
1038 (BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)
1039
1040 // Constant comparisons.
1041 (CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ)
1042 (CMPconst (MOVDconst [x]) [y]) && x<int64(y) => (FlagLT)
1043 (CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT)
1044 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ)
1045 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
1046 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
1047
1048 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
1049 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
1050 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
1051 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ)
1052 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
1053 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
1054
1055 (CMP(W|WU)const (MOVBZreg _) [c]) && 0xff < c => (FlagLT)
1056 (CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT)
1057
1058 (CMPconst (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
1059 (CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
1060
1061 (CMPUconst (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n) => (FlagLT)
1062 (CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n) => (FlagLT)
1063
1064 (CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT)
1065 (CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
1066
1067 (CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
1068 (CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
1069
1070 // Constant compare-and-branch with immediate.
1071 (CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no)
1072 (CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no)
1073 (CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int64(x) > int64(y) => (First yes no)
1074 (CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int32(x) == int32(y) => (First yes no)
1075 (CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int32(x) < int32(y) => (First yes no)
1076 (CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int32(x) > int32(y) => (First yes no)
1077 (CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint64(x) == uint64(y) => (First yes no)
1078 (CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint64(x) < uint64(y) => (First yes no)
1079 (CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) > uint64(y) => (First yes no)
1080 (CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint32(x) == uint32(y) => (First yes no)
1081 (CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint32(x) < uint32(y) => (First yes no)
1082 (CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) > uint32(y) => (First yes no)
1083 (CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int64(x) == int64(y) => (First no yes)
1084 (CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int64(x) < int64(y) => (First no yes)
1085 (CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int64(x) > int64(y) => (First no yes)
1086 (CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int32(x) == int32(y) => (First no yes)
1087 (CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int32(x) < int32(y) => (First no yes)
1088 (CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int32(x) > int32(y) => (First no yes)
1089 (CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint64(x) == uint64(y) => (First no yes)
1090 (CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint64(x) < uint64(y) => (First no yes)
1091 (CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) > uint64(y) => (First no yes)
1092 (CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint32(x) == uint32(y) => (First no yes)
1093 (CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint32(x) < uint32(y) => (First no yes)
1094 (CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) > uint32(y) => (First no yes)
1095
1096 // Constant compare-and-branch with immediate when unsigned comparison with zero.
1097 (C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no)
1098 (C(L|LG)IJ {s390x.Less} _ [0] yes no) => (First no yes)
1099
1100 // Constant compare-and-branch when operands match.
1101 (C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no)
1102 (C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes)
1103
1104 // Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
1105 // to unsigned comparisons.
1106 // Helps simplify constant comparison detection.
1107 (CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c])
1108 (CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
1109 (CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
1110 (CMPconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c])
1111 (CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 => (CMPWUconst x [c])
1112 (CMPconst x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst x [n])
1113 (CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n])
1114
1115 // Absorb sign and zero extensions into 32-bit comparisons.
1116 (CMP(W|W|WU|WU) x (MOV(W|WZ|W|WZ)reg y)) => (CMP(W|W|WU|WU) x y)
1117 (CMP(W|W|WU|WU) (MOV(W|WZ|W|WZ)reg x) y) => (CMP(W|W|WU|WU) x y)
1118 (CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c])
1119
1120 // Absorb flag constants into branches.
1121 (BRC {c} (FlagEQ) yes no) && c&s390x.Equal != 0 => (First yes no)
1122 (BRC {c} (FlagLT) yes no) && c&s390x.Less != 0 => (First yes no)
1123 (BRC {c} (FlagGT) yes no) && c&s390x.Greater != 0 => (First yes no)
1124 (BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no)
1125
1126 (BRC {c} (FlagEQ) yes no) && c&s390x.Equal == 0 => (First no yes)
1127 (BRC {c} (FlagLT) yes no) && c&s390x.Less == 0 => (First no yes)
1128 (BRC {c} (FlagGT) yes no) && c&s390x.Greater == 0 => (First no yes)
1129 (BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes)
1130
1131 // Absorb flag constants into SETxx ops.
1132 (LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal != 0 => x
1133 (LOCGR {c} _ x (FlagLT)) && c&s390x.Less != 0 => x
1134 (LOCGR {c} _ x (FlagGT)) && c&s390x.Greater != 0 => x
1135 (LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x
1136
1137 (LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal == 0 => x
1138 (LOCGR {c} x _ (FlagLT)) && c&s390x.Less == 0 => x
1139 (LOCGR {c} x _ (FlagGT)) && c&s390x.Greater == 0 => x
1140 (LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x
1141
1142 // Remove redundant *const ops
1143 (ADDconst [0] x) => x
1144 (ADDWconst [c] x) && int32(c)==0 => x
1145 (SUBconst [0] x) => x
1146 (SUBWconst [c] x) && int32(c) == 0 => x
1147 (ANDconst [0] _) => (MOVDconst [0])
1148 (ANDWconst [c] _) && int32(c)==0 => (MOVDconst [0])
1149 (ANDconst [-1] x) => x
1150 (ANDWconst [c] x) && int32(c)==-1 => x
1151 (ORconst [0] x) => x
1152 (ORWconst [c] x) && int32(c)==0 => x
1153 (ORconst [-1] _) => (MOVDconst [-1])
1154 (ORWconst [c] _) && int32(c)==-1 => (MOVDconst [-1])
1155 (XORconst [0] x) => x
1156 (XORWconst [c] x) && int32(c)==0 => x
1157
1158 // Shifts by zero (may be inserted during multiplication strength reduction).
1159 ((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x
1160
1161 // Convert constant subtracts to constant adds.
1162 (SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x)
1163 (SUBWconst [c] x) => (ADDWconst [-int32(c)] x)
1164
1165 // generic constant folding
1166 // TODO: more of this
1167 (ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
1168 (ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
1169 (ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x)
1170 (ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x)
1171 (SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)])
1172 (SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x)
1173 (SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
1174 (SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)])
1175 (NEG (MOVDconst [c])) => (MOVDconst [-c])
1176 (NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))])
1177 (MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d])
1178 (MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))])
1179 (AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
1180 (ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
1181 (ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d])
1182 (OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
1183 (ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
1184 (ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d])
1185 (XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
1186 (XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
1187 (XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d])
1188 (LoweredRound32F x:(FMOVSconst)) => x
1189 (LoweredRound64F x:(FMOVDconst)) => x
1190
1191 // generic simplifications
1192 // TODO: more of this
1193 (ADD x (NEG y)) => (SUB x y)
1194 (ADDW x (NEGW y)) => (SUBW x y)
1195 (SUB x x) => (MOVDconst [0])
1196 (SUBW x x) => (MOVDconst [0])
1197 (AND x x) => x
1198 (ANDW x x) => x
1199 (OR x x) => x
1200 (ORW x x) => x
1201 (XOR x x) => (MOVDconst [0])
1202 (XORW x x) => (MOVDconst [0])
1203 (NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x)
1204 (MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
1205 (MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
1206 (MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
1207 (MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
1208
1209 // carry flag generation
1210 // (only constant fold carry of zero)
1211 (Select1 (ADDCconst (MOVDconst [c]) [d]))
1212 && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
1213 => (FlagEQ)
1214 (Select1 (ADDCconst (MOVDconst [c]) [d]))
1215 && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
1216 => (FlagLT)
1217
1218 // borrow flag generation
1219 // (only constant fold borrow of zero)
1220 (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
1221 && uint64(d) <= uint64(c) && c-d == 0
1222 => (FlagGT)
1223 (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
1224 && uint64(d) <= uint64(c) && c-d != 0
1225 => (FlagOV)
1226
1227 // add with carry
1228 (ADDE x y (FlagEQ)) => (ADDC x y)
1229 (ADDE x y (FlagLT)) => (ADDC x y)
1230 (ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)])
1231 (Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)])
1232
1233 // subtract with borrow
1234 (SUBE x y (FlagGT)) => (SUBC x y)
1235 (SUBE x y (FlagOV)) => (SUBC x y)
1236 (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d])
1237
1238 // collapse carry chain
1239 (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
1240 => (ADDE x y c)
1241
1242 // collapse borrow chain
1243 (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
1244 => (SUBE x y c)
1245
1246 // branch on carry
1247 (C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry)
1248 (C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry} carry)
1249 (C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
1250 (C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry)
1251 (C(G|LG)IJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
1252
1253 // branch on borrow
1254 (C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow)
1255 (C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow} borrow)
1256 (C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
1257 (C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow)
1258 (C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
1259
1260 // fused multiply-add
1261 (Select0 (F(ADD|SUB) (FMUL y z) x)) => (FM(ADD|SUB) x y z)
1262 (Select0 (F(ADDS|SUBS) (FMULS y z) x)) => (FM(ADDS|SUBS) x y z)
1263
1264 // Convert floating point comparisons against zero into 'load and test' instructions.
1265 (F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x)
1266 (F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR <v.Type> x))
1267
1268 // FSUB, FSUBS, FADD, FADDS now produce a condition code representing the
1269 // comparison of the result with 0.0. If a compare with zero instruction
1270 // (e.g. LTDBR) is following one of those instructions, we can use the
1271 // generated flag and remove the comparison instruction.
1272 // Note: when inserting Select1 ops we need to ensure they are in the
1273 // same block as their argument. We could also use @x.Block for this
1274 // but moving the flag generating value to a different block seems to
1275 // increase the likelihood that the flags value will have to be regenerated
1276 // by flagalloc which is not what we want.
1277 (LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x)
1278 (LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x)
1279
1280 // Fold memory operations into operations.
1281 // Exclude global data (SB) because these instructions cannot handle relative addresses.
1282 // TODO(mundaym): indexed versions of these?
1283 ((ADD|SUB|MULLD|AND|OR|XOR) <t> x g:(MOVDload [off] {sym} ptr mem))
1284 && ptr.Op != OpSB
1285 && is20Bit(int64(off))
1286 && canMergeLoadClobber(v, g, x)
1287 && clobber(g)
1288 => ((ADD|SUB|MULLD|AND|OR|XOR)load <t> [off] {sym} x ptr mem)
1289 ((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWload [off] {sym} ptr mem))
1290 && ptr.Op != OpSB
1291 && is20Bit(int64(off))
1292 && canMergeLoadClobber(v, g, x)
1293 && clobber(g)
1294 => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
1295 ((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWZload [off] {sym} ptr mem))
1296 && ptr.Op != OpSB
1297 && is20Bit(int64(off))
1298 && canMergeLoadClobber(v, g, x)
1299 && clobber(g)
1300 => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
1301
1302 // Combine constant stores into larger (unaligned) stores.
1303 // Avoid SB because constant stores to relative offsets are
1304 // emulated by the assembler and also can't handle unaligned offsets.
1305 (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
1306 && p.Op != OpSB
1307 && x.Uses == 1
1308 && a.Off() + 1 == c.Off()
1309 && clobber(x)
1310 => (MOVHstoreconst [makeValAndOff(c.Val()&0xff | a.Val()<<8, a.Off())] {s} p mem)
1311 (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
1312 && p.Op != OpSB
1313 && x.Uses == 1
1314 && a.Off() + 2 == c.Off()
1315 && clobber(x)
1316 => (MOVWstore [a.Off()] {s} p (MOVDconst [int64(c.Val()&0xffff | a.Val()<<16)]) mem)
1317 (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
1318 && p.Op != OpSB
1319 && x.Uses == 1
1320 && a.Off() + 4 == c.Off()
1321 && clobber(x)
1322 => (MOVDstore [a.Off()] {s} p (MOVDconst [c.Val64()&0xffffffff | a.Val64()<<32]) mem)
1323
1324 // Combine stores into larger (unaligned) stores.
1325 // It doesn't work on global data (based on SB) because stores with relative addressing
1326 // require that the memory operand be aligned.
1327 (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
1328 && p.Op != OpSB
1329 && x.Uses == 1
1330 && clobber(x)
1331 => (MOVHstore [i-1] {s} p w mem)
1332 (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
1333 && p.Op != OpSB
1334 && x.Uses == 1
1335 && clobber(x)
1336 => (MOVHstore [i-1] {s} p w0 mem)
1337 (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
1338 && p.Op != OpSB
1339 && x.Uses == 1
1340 && clobber(x)
1341 => (MOVHstore [i-1] {s} p w mem)
1342 (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
1343 && p.Op != OpSB
1344 && x.Uses == 1
1345 && clobber(x)
1346 => (MOVHstore [i-1] {s} p w0 mem)
1347 (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
1348 && p.Op != OpSB
1349 && x.Uses == 1
1350 && clobber(x)
1351 => (MOVWstore [i-2] {s} p w mem)
1352 (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
1353 && p.Op != OpSB
1354 && x.Uses == 1
1355 && clobber(x)
1356 => (MOVWstore [i-2] {s} p w0 mem)
1357 (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
1358 && p.Op != OpSB
1359 && x.Uses == 1
1360 && clobber(x)
1361 => (MOVWstore [i-2] {s} p w mem)
1362 (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
1363 && p.Op != OpSB
1364 && x.Uses == 1
1365 && clobber(x)
1366 => (MOVWstore [i-2] {s} p w0 mem)
1367 (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
1368 && p.Op != OpSB
1369 && x.Uses == 1
1370 && clobber(x)
1371 => (MOVDstore [i-4] {s} p w mem)
1372 (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
1373 && p.Op != OpSB
1374 && x.Uses == 1
1375 && clobber(x)
1376 => (MOVDstore [i-4] {s} p w0 mem)
1377
1378 // Combine stores into larger (unaligned) stores with the bytes reversed (little endian).
1379 // Store-with-bytes-reversed instructions do not support relative memory addresses,
1380 // so these stores can't operate on global data (SB).
1381 (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
1382 && p.Op != OpSB
1383 && x.Uses == 1
1384 && clobber(x)
1385 => (MOVHBRstore [i-1] {s} p w mem)
1386 (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
1387 && p.Op != OpSB
1388 && x.Uses == 1
1389 && clobber(x)
1390 => (MOVHBRstore [i-1] {s} p w0 mem)
1391 (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
1392 && p.Op != OpSB
1393 && x.Uses == 1
1394 && clobber(x)
1395 => (MOVHBRstore [i-1] {s} p w mem)
1396 (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
1397 && p.Op != OpSB
1398 && x.Uses == 1
1399 && clobber(x)
1400 => (MOVHBRstore [i-1] {s} p w0 mem)
1401 (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
1402 && x.Uses == 1
1403 && clobber(x)
1404 => (MOVWBRstore [i-2] {s} p w mem)
1405 (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
1406 && x.Uses == 1
1407 && clobber(x)
1408 => (MOVWBRstore [i-2] {s} p w0 mem)
1409 (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
1410 && x.Uses == 1
1411 && clobber(x)
1412 => (MOVWBRstore [i-2] {s} p w mem)
1413 (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
1414 && x.Uses == 1
1415 && clobber(x)
1416 => (MOVWBRstore [i-2] {s} p w0 mem)
1417 (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
1418 && x.Uses == 1
1419 && clobber(x)
1420 => (MOVDBRstore [i-4] {s} p w mem)
1421 (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
1422 && x.Uses == 1
1423 && clobber(x)
1424 => (MOVDBRstore [i-4] {s} p w0 mem)
1425
1426 (MOVBstore [7] {s} p1 (SRDconst w)
1427 x1:(MOVHBRstore [5] {s} p1 (SRDconst w)
1428 x2:(MOVWBRstore [1] {s} p1 (SRDconst w)
1429 x3:(MOVBstore [0] {s} p1 w mem))))
1430 && x1.Uses == 1
1431 && x2.Uses == 1
1432 && x3.Uses == 1
1433 && clobber(x1, x2, x3)
1434 => (MOVDBRstore {s} p1 w mem)
1435
1436 // Combining byte loads into larger (unaligned) loads.
1437
1438 // Big-endian loads
1439
1440 (ORW x1:(MOVBZload [i1] {s} p mem)
1441 sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)))
1442 && i1 == i0+1
1443 && p.Op != OpSB
1444 && x0.Uses == 1
1445 && x1.Uses == 1
1446 && sh.Uses == 1
1447 && mergePoint(b,x0,x1) != nil
1448 && clobber(x0, x1, sh)
1449 => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
1450
1451 (OR x1:(MOVBZload [i1] {s} p mem)
1452 sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)))
1453 && i1 == i0+1
1454 && p.Op != OpSB
1455 && x0.Uses == 1
1456 && x1.Uses == 1
1457 && sh.Uses == 1
1458 && mergePoint(b,x0,x1) != nil
1459 && clobber(x0, x1, sh)
1460 => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
1461
1462 (ORW x1:(MOVHZload [i1] {s} p mem)
1463 sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)))
1464 && i1 == i0+2
1465 && p.Op != OpSB
1466 && x0.Uses == 1
1467 && x1.Uses == 1
1468 && sh.Uses == 1
1469 && mergePoint(b,x0,x1) != nil
1470 && clobber(x0, x1, sh)
1471 => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
1472
1473 (OR x1:(MOVHZload [i1] {s} p mem)
1474 sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)))
1475 && i1 == i0+2
1476 && p.Op != OpSB
1477 && x0.Uses == 1
1478 && x1.Uses == 1
1479 && sh.Uses == 1
1480 && mergePoint(b,x0,x1) != nil
1481 && clobber(x0, x1, sh)
1482 => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
1483
1484 (OR x1:(MOVWZload [i1] {s} p mem)
1485 sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)))
1486 && i1 == i0+4
1487 && p.Op != OpSB
1488 && x0.Uses == 1
1489 && x1.Uses == 1
1490 && sh.Uses == 1
1491 && mergePoint(b,x0,x1) != nil
1492 && clobber(x0, x1, sh)
1493 => @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem)
1494
1495 (ORW
1496 s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
1497 or:(ORW
1498 s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
1499 y))
1500 && i1 == i0+1
1501 && j1 == j0-8
1502 && j1 % 16 == 0
1503 && x0.Uses == 1
1504 && x1.Uses == 1
1505 && s0.Uses == 1
1506 && s1.Uses == 1
1507 && or.Uses == 1
1508 && mergePoint(b,x0,x1,y) != nil
1509 && clobber(x0, x1, s0, s1, or)
1510 => @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
1511
1512 (OR
1513 s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
1514 or:(OR
1515 s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
1516 y))
1517 && i1 == i0+1
1518 && j1 == j0-8
1519 && j1 % 16 == 0
1520 && x0.Uses == 1
1521 && x1.Uses == 1
1522 && s0.Uses == 1
1523 && s1.Uses == 1
1524 && or.Uses == 1
1525 && mergePoint(b,x0,x1,y) != nil
1526 && clobber(x0, x1, s0, s1, or)
1527 => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
1528
1529 (OR
1530 s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))
1531 or:(OR
1532 s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))
1533 y))
1534 && i1 == i0+2
1535 && j1 == j0-16
1536 && j1 % 32 == 0
1537 && x0.Uses == 1
1538 && x1.Uses == 1
1539 && s0.Uses == 1
1540 && s1.Uses == 1
1541 && or.Uses == 1
1542 && mergePoint(b,x0,x1,y) != nil
1543 && clobber(x0, x1, s0, s1, or)
1544 => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZload [i0] {s} p mem)) y)
1545
1546 // Little-endian loads
1547
1548 (ORW x0:(MOVBZload [i0] {s} p mem)
1549 sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)))
1550 && p.Op != OpSB
1551 && i1 == i0+1
1552 && x0.Uses == 1
1553 && x1.Uses == 1
1554 && sh.Uses == 1
1555 && mergePoint(b,x0,x1) != nil
1556 && clobber(x0, x1, sh)
1557 => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
1558
1559 (OR x0:(MOVBZload [i0] {s} p mem)
1560 sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)))
1561 && p.Op != OpSB
1562 && i1 == i0+1
1563 && x0.Uses == 1
1564 && x1.Uses == 1
1565 && sh.Uses == 1
1566 && mergePoint(b,x0,x1) != nil
1567 && clobber(x0, x1, sh)
1568 => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
1569
1570 (ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
1571 sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
1572 && i1 == i0+2
1573 && x0.Uses == 1
1574 && x1.Uses == 1
1575 && r0.Uses == 1
1576 && r1.Uses == 1
1577 && sh.Uses == 1
1578 && mergePoint(b,x0,x1) != nil
1579 && clobber(x0, x1, r0, r1, sh)
1580 => @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem)
1581
1582 (OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
1583 sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
1584 && i1 == i0+2
1585 && x0.Uses == 1
1586 && x1.Uses == 1
1587 && r0.Uses == 1
1588 && r1.Uses == 1
1589 && sh.Uses == 1
1590 && mergePoint(b,x0,x1) != nil
1591 && clobber(x0, x1, r0, r1, sh)
1592 => @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem))
1593
1594 (OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem))
1595 sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))))
1596 && i1 == i0+4
1597 && x0.Uses == 1
1598 && x1.Uses == 1
1599 && r0.Uses == 1
1600 && r1.Uses == 1
1601 && sh.Uses == 1
1602 && mergePoint(b,x0,x1) != nil
1603 && clobber(x0, x1, r0, r1, sh)
1604 => @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem)
1605
1606 (ORW
1607 s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
1608 or:(ORW
1609 s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
1610 y))
1611 && p.Op != OpSB
1612 && i1 == i0+1
1613 && j1 == j0+8
1614 && j0 % 16 == 0
1615 && x0.Uses == 1
1616 && x1.Uses == 1
1617 && s0.Uses == 1
1618 && s1.Uses == 1
1619 && or.Uses == 1
1620 && mergePoint(b,x0,x1,y) != nil
1621 && clobber(x0, x1, s0, s1, or)
1622 => @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
1623
1624 (OR
1625 s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
1626 or:(OR
1627 s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
1628 y))
1629 && p.Op != OpSB
1630 && i1 == i0+1
1631 && j1 == j0+8
1632 && j0 % 16 == 0
1633 && x0.Uses == 1
1634 && x1.Uses == 1
1635 && s0.Uses == 1
1636 && s1.Uses == 1
1637 && or.Uses == 1
1638 && mergePoint(b,x0,x1,y) != nil
1639 && clobber(x0, x1, s0, s1, or)
1640 => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
1641
1642 (OR
1643 s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))
1644 or:(OR
1645 s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))
1646 y))
1647 && i1 == i0+2
1648 && j1 == j0+16
1649 && j0 % 32 == 0
1650 && x0.Uses == 1
1651 && x1.Uses == 1
1652 && r0.Uses == 1
1653 && r1.Uses == 1
1654 && s0.Uses == 1
1655 && s1.Uses == 1
1656 && or.Uses == 1
1657 && mergePoint(b,x0,x1,y) != nil
1658 && clobber(x0, x1, r0, r1, s0, s1, or)
1659 => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y)
1660
1661 // Combine stores into store multiples.
1662 // 32-bit
1663 (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
1664 && p.Op != OpSB
1665 && x.Uses == 1
1666 && is20Bit(int64(i)-4)
1667 && clobber(x)
1668 => (STM2 [i-4] {s} p w0 w1 mem)
1669 (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
1670 && x.Uses == 1
1671 && is20Bit(int64(i)-8)
1672 && clobber(x)
1673 => (STM3 [i-8] {s} p w0 w1 w2 mem)
1674 (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
1675 && x.Uses == 1
1676 && is20Bit(int64(i)-12)
1677 && clobber(x)
1678 => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
1679 (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
1680 && x.Uses == 1
1681 && is20Bit(int64(i)-8)
1682 && clobber(x)
1683 => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
1684 // 64-bit
1685 (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
1686 && p.Op != OpSB
1687 && x.Uses == 1
1688 && is20Bit(int64(i)-8)
1689 && clobber(x)
1690 => (STMG2 [i-8] {s} p w0 w1 mem)
1691 (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
1692 && x.Uses == 1
1693 && is20Bit(int64(i)-16)
1694 && clobber(x)
1695 => (STMG3 [i-16] {s} p w0 w1 w2 mem)
1696 (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
1697 && x.Uses == 1
1698 && is20Bit(int64(i)-24)
1699 && clobber(x)
1700 => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
1701 (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
1702 && x.Uses == 1
1703 && is20Bit(int64(i)-16)
1704 && clobber(x)
1705 => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
1706
1707 // Convert 32-bit store multiples into 64-bit stores.
1708 (STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem)
1709
View as plain text