Text file src/cmd/compile/internal/ssa/gen/AMD64.rules

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
     7  (AddPtr ...) => (ADDQ ...)
     8  (Add(32|64)F ...) => (ADDS(S|D) ...)
     9  
    10  (Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
    11  (SubPtr ...) => (SUBQ ...)
    12  (Sub(32|64)F ...) => (SUBS(S|D) ...)
    13  
    14  (Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
    15  (Mul(32|64)F ...) => (MULS(S|D) ...)
    16  
    17  (Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
    18  (Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
    19  (Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
    20  
    21  (Hmul(64|32) ...) => (HMUL(Q|L) ...)
    22  (Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
    23  
    24  (Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
    25  (Div8  x y) => (Select0 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    26  (Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
    27  (Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    28  (Div(32|64)F ...) => (DIVS(S|D) ...)
    29  
    30  (Select0 (Add64carry x y c)) =>
    31  	(Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
    32  (Select1 (Add64carry x y c)) =>
    33  	(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
    34  (Select0 (Sub64borrow x y c)) =>
    35  	(Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
    36  (Select1 (Sub64borrow x y c)) =>
    37  	(NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
    38  
    39  // Optimize ADCQ and friends
    40  (ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
    41  (ADCQ x y (FlagEQ)) => (ADDQcarry x y)
    42  (ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
    43  (ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
    44  (SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
    45  (SBBQ x y (FlagEQ)) => (SUBQborrow x y)
    46  (SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
    47  (SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
    48  (Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
    49  (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
    50  
    51  
    52  (Mul64uhilo ...) => (MULQU2 ...)
    53  (Div128u ...) => (DIVQU2 ...)
    54  
    55  (Avg64u ...) => (AVGQU ...)
    56  
    57  (Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
    58  (Mod8  x y) => (Select1 (DIVW  (SignExt8to16 x) (SignExt8to16 y)))
    59  (Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
    60  (Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
    61  
    62  (And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
    63  (Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
    64  (Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
    65  (Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
    66  
    67  (Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
    68  (Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
    69  (Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
    70  
    71  // Lowering boolean ops
    72  (AndB ...) => (ANDL ...)
    73  (OrB ...) => (ORL ...)
    74  (Not x) => (XORLconst [1] x)
    75  
    76  // Lowering pointer arithmetic
    77  (OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
    78  (OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
    79  
    80  // Lowering other arithmetic
    81  (Ctz64 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
    82  (Ctz32 x)     && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    83  (Ctz64 <t> x) && buildcfg.GOAMD64 <  3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
    84  (Ctz32 x)     && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
    85  (Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x))
    86  (Ctz8  x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x))
    87  
    88  (Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
    89  (Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    90  (Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    91  (Ctz8NonZero  x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
    92  (Ctz64NonZero x) && buildcfg.GOAMD64 <  3 => (Select0 (BSFQ x))
    93  (Ctz32NonZero x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    94  (Ctz16NonZero x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    95  (Ctz8NonZero  x) && buildcfg.GOAMD64 <  3 => (BSFL x)
    96  
    97  // BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
    98  // However, for zero-extended values, we can cheat a bit, and calculate
    99  // BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
   100  // places the index of the highest set bit where we want it.
   101  (BitLen64 <t> x) => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
   102  (BitLen32 x) => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
   103  (BitLen16 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
   104  (BitLen8  x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
   105  
   106  (Bswap(64|32) ...) => (BSWAP(Q|L) ...)
   107  
   108  (PopCount(64|32) ...) => (POPCNT(Q|L) ...)
   109  (PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
   110  (PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
   111  
   112  (Sqrt ...) => (SQRTSD ...)
   113  (Sqrt32 ...) => (SQRTSS ...)
   114  
   115  (RoundToEven x) => (ROUNDSD [0] x)
   116  (Floor x)       => (ROUNDSD [1] x)
   117  (Ceil x)        => (ROUNDSD [2] x)
   118  (Trunc x)       => (ROUNDSD [3] x)
   119  
   120  (FMA x y z) => (VFMADD231SD z x y)
   121  
   122  // Lowering extension
   123  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
   124  (SignExt8to16  ...) => (MOVBQSX ...)
   125  (SignExt8to32  ...) => (MOVBQSX ...)
   126  (SignExt8to64  ...) => (MOVBQSX ...)
   127  (SignExt16to32 ...) => (MOVWQSX ...)
   128  (SignExt16to64 ...) => (MOVWQSX ...)
   129  (SignExt32to64 ...) => (MOVLQSX ...)
   130  
   131  (ZeroExt8to16  ...) => (MOVBQZX ...)
   132  (ZeroExt8to32  ...) => (MOVBQZX ...)
   133  (ZeroExt8to64  ...) => (MOVBQZX ...)
   134  (ZeroExt16to32 ...) => (MOVWQZX ...)
   135  (ZeroExt16to64 ...) => (MOVWQZX ...)
   136  (ZeroExt32to64 ...) => (MOVLQZX ...)
   137  
   138  (Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
   139  
   140  (SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
   141  (SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
   142  
   143  // Lowering truncation
   144  // Because we ignore high parts of registers, truncates are just copies.
   145  (Trunc16to8  ...) => (Copy ...)
   146  (Trunc32to8  ...) => (Copy ...)
   147  (Trunc32to16 ...) => (Copy ...)
   148  (Trunc64to8  ...) => (Copy ...)
   149  (Trunc64to16 ...) => (Copy ...)
   150  (Trunc64to32 ...) => (Copy ...)
   151  
   152  // Lowering float <-> int
   153  (Cvt32to32F ...) => (CVTSL2SS ...)
   154  (Cvt32to64F ...) => (CVTSL2SD ...)
   155  (Cvt64to32F ...) => (CVTSQ2SS ...)
   156  (Cvt64to64F ...) => (CVTSQ2SD ...)
   157  
   158  (Cvt32Fto32 ...) => (CVTTSS2SL ...)
   159  (Cvt32Fto64 ...) => (CVTTSS2SQ ...)
   160  (Cvt64Fto32 ...) => (CVTTSD2SL ...)
   161  (Cvt64Fto64 ...) => (CVTTSD2SQ ...)
   162  
   163  (Cvt32Fto64F ...) => (CVTSS2SD ...)
   164  (Cvt64Fto32F ...) => (CVTSD2SS ...)
   165  
   166  (Round(32|64)F ...) => (Copy ...)
   167  
   168  (CvtBoolToUint8 ...) => (Copy ...)
   169  
   170  // Lowering shifts
   171  // Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   172  //   result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
   173  (Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
   174  (Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   175  (Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   176  (Lsh8x(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   177  
   178  (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
   179  (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
   180  (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
   181  (Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SHLL x y)
   182  
   183  (Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
   184  (Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
   185  (Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
   186  (Rsh8Ux(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
   187  
   188  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
   189  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
   190  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
   191  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SHRB x y)
   192  
   193  // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   194  // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
   195  (Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
   196  (Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
   197  (Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
   198  (Rsh8x(64|32|16|8)  <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
   199  
   200  (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
   201  (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
   202  (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
   203  (Rsh8x(64|32|16|8) x y)  && shiftIsBounded(v) => (SARB x y)
   204  
   205  // Lowering integer comparisons
   206  (Less(64|32|16|8)      x y) => (SETL  (CMP(Q|L|W|B)     x y))
   207  (Less(64|32|16|8)U     x y) => (SETB  (CMP(Q|L|W|B)     x y))
   208  (Leq(64|32|16|8)       x y) => (SETLE (CMP(Q|L|W|B)     x y))
   209  (Leq(64|32|16|8)U      x y) => (SETBE (CMP(Q|L|W|B)     x y))
   210  (Eq(Ptr|64|32|16|8|B)  x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
   211  (Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
   212  
   213  // Lowering floating point comparisons
   214  // Note Go assembler gets UCOMISx operand order wrong, but it is right here
   215  // and the operands are reversed when generating assembly language.
   216  (Eq(32|64)F   x y) => (SETEQF (UCOMIS(S|D) x y))
   217  (Neq(32|64)F  x y) => (SETNEF (UCOMIS(S|D) x y))
   218  // Use SETGF/SETGEF with reversed operands to dodge NaN case.
   219  (Less(32|64)F x y) => (SETGF  (UCOMIS(S|D) y x))
   220  (Leq(32|64)F  x y) => (SETGEF (UCOMIS(S|D) y x))
   221  
   222  // Lowering loads
   223  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
   224  (Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
   225  (Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
   226  (Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
   227  (Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
   228  (Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
   229  
   230  // Lowering stores
   231  // These more-specific FP versions of Store pattern should come first.
   232  (Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem)
   233  (Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem)
   234  
   235  (Store {t} ptr val mem) && t.Size() == 8 => (MOVQstore ptr val mem)
   236  (Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem)
   237  (Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
   238  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   239  
   240  // Lowering moves
   241  (Move [0] _ _ mem) => mem
   242  (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
   243  (Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
   244  (Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
   245  (Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
   246  (Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem)
   247  (Move [16] dst src mem) && !config.useSSE =>
   248  	(MOVQstore [8] dst (MOVQload [8] src mem)
   249  		(MOVQstore dst (MOVQload src mem) mem))
   250  
   251  (Move [32] dst src mem) =>
   252  	(Move [16]
   253  		(OffPtr <dst.Type> dst [16])
   254  		(OffPtr <src.Type> src [16])
   255  		(Move [16] dst src mem))
   256  
   257  (Move [48] dst src mem) && config.useSSE =>
   258  	(Move [32]
   259  		(OffPtr <dst.Type> dst [16])
   260  		(OffPtr <src.Type> src [16])
   261  		(Move [16] dst src mem))
   262  
   263  (Move [64] dst src mem) && config.useSSE =>
   264  	(Move [32]
   265  		(OffPtr <dst.Type> dst [32])
   266  		(OffPtr <src.Type> src [32])
   267  		(Move [32] dst src mem))
   268  
   269  (Move [3] dst src mem) =>
   270  	(MOVBstore [2] dst (MOVBload [2] src mem)
   271  		(MOVWstore dst (MOVWload src mem) mem))
   272  (Move [5] dst src mem) =>
   273  	(MOVBstore [4] dst (MOVBload [4] src mem)
   274  		(MOVLstore dst (MOVLload src mem) mem))
   275  (Move [6] dst src mem) =>
   276  	(MOVWstore [4] dst (MOVWload [4] src mem)
   277  		(MOVLstore dst (MOVLload src mem) mem))
   278  (Move [7] dst src mem) =>
   279  	(MOVLstore [3] dst (MOVLload [3] src mem)
   280  		(MOVLstore dst (MOVLload src mem) mem))
   281  (Move [9] dst src mem) =>
   282  	(MOVBstore [8] dst (MOVBload [8] src mem)
   283  		(MOVQstore dst (MOVQload src mem) mem))
   284  (Move [10] dst src mem) =>
   285  	(MOVWstore [8] dst (MOVWload [8] src mem)
   286  		(MOVQstore dst (MOVQload src mem) mem))
   287  (Move [12] dst src mem) =>
   288  	(MOVLstore [8] dst (MOVLload [8] src mem)
   289  		(MOVQstore dst (MOVQload src mem) mem))
   290  (Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 =>
   291  	(MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
   292  		(MOVQstore dst (MOVQload src mem) mem))
   293  
   294  // Adjust moves to be a multiple of 16 bytes.
   295  (Move [s] dst src mem)
   296  	&& s > 16 && s%16 != 0 && s%16 <= 8 =>
   297  	(Move [s-s%16]
   298  		(OffPtr <dst.Type> dst [s%16])
   299  		(OffPtr <src.Type> src [s%16])
   300  		(MOVQstore dst (MOVQload src mem) mem))
   301  (Move [s] dst src mem)
   302  	&& s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE =>
   303  	(Move [s-s%16]
   304  		(OffPtr <dst.Type> dst [s%16])
   305  		(OffPtr <src.Type> src [s%16])
   306  		(MOVOstore dst (MOVOload src mem) mem))
   307  (Move [s] dst src mem)
   308  	&& s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE =>
   309  	(Move [s-s%16]
   310  		(OffPtr <dst.Type> dst [s%16])
   311  		(OffPtr <src.Type> src [s%16])
   312  		(MOVQstore [8] dst (MOVQload [8] src mem)
   313  			(MOVQstore dst (MOVQload src mem) mem)))
   314  
   315  // Medium copying uses a duff device.
   316  (Move [s] dst src mem)
   317  	&& s > 64 && s <= 16*64 && s%16 == 0
   318  	&& !config.noDuffDevice && logLargeCopy(v, s) =>
   319  	(DUFFCOPY [s] dst src mem)
   320  
   321  // Large copying uses REP MOVSQ.
   322  (Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) =>
   323  	(REPMOVSQ dst src (MOVQconst [s/8]) mem)
   324  
   325  // Lowering Zero instructions
   326  (Zero [0] _ mem) => mem
   327  (Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
   328  (Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
   329  (Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
   330  (Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
   331  
   332  (Zero [3] destptr mem) =>
   333  	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   334  		(MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
   335  (Zero [5] destptr mem) =>
   336  	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   337  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   338  (Zero [6] destptr mem) =>
   339  	(MOVWstoreconst [makeValAndOff(0,4)] destptr
   340  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   341  (Zero [7] destptr mem) =>
   342  	(MOVLstoreconst [makeValAndOff(0,3)] destptr
   343  		(MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
   344  
   345  // Strip off any fractional word zeroing.
   346  (Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE =>
   347  	(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
   348  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   349  
   350  // Zero small numbers of words directly.
   351  (Zero [16] destptr mem) && !config.useSSE =>
   352  	(MOVQstoreconst [makeValAndOff(0,8)] destptr
   353  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   354  (Zero [24] destptr mem) && !config.useSSE =>
   355  	(MOVQstoreconst [makeValAndOff(0,16)] destptr
   356  		(MOVQstoreconst [makeValAndOff(0,8)] destptr
   357  			(MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
   358  (Zero [32] destptr mem) && !config.useSSE =>
   359  	(MOVQstoreconst [makeValAndOff(0,24)] destptr
   360  		(MOVQstoreconst [makeValAndOff(0,16)] destptr
   361  			(MOVQstoreconst [makeValAndOff(0,8)] destptr
   362  				(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
   363  
   364  (Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
   365  	(MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
   366  		(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
   367  
   368  // Adjust zeros to be a multiple of 16 bytes.
   369  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
   370  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   371  		(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
   372  
   373  (Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
   374  	(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
   375  		(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
   376  
   377  (Zero [16] destptr mem) && config.useSSE =>
   378  	(MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
   379  (Zero [32] destptr mem) && config.useSSE =>
   380  	(MOVOstoreconst [makeValAndOff(0,16)] destptr
   381  		(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
   382  (Zero [48] destptr mem) && config.useSSE =>
   383  	(MOVOstoreconst [makeValAndOff(0,32)] destptr
   384  		(MOVOstoreconst [makeValAndOff(0,16)] destptr
   385  			(MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
   386  (Zero [64] destptr mem) && config.useSSE =>
   387  	(MOVOstoreconst [makeValAndOff(0,48)] destptr
   388  		(MOVOstoreconst [makeValAndOff(0,32)] destptr
   389  			(MOVOstoreconst [makeValAndOff(0,16)] destptr
   390  				(MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
   391  
   392  // Medium zeroing uses a duff device.
   393  (Zero [s] destptr mem)
   394  	&& s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
   395  	(DUFFZERO [s] destptr mem)
   396  
   397  // Large zeroing uses REP STOSQ.
   398  (Zero [s] destptr mem)
   399  	&& (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
   400  	&& s%8 == 0 =>
   401  	(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
   402  
   403  // Lowering constants
   404  (Const8   [c]) => (MOVLconst [int32(c)])
   405  (Const16  [c]) => (MOVLconst [int32(c)])
   406  (Const32  ...) => (MOVLconst ...)
   407  (Const64  ...) => (MOVQconst ...)
   408  (Const32F ...) => (MOVSSconst ...)
   409  (Const64F ...) => (MOVSDconst ...)
   410  (ConstNil    ) => (MOVQconst [0])
   411  (ConstBool [c]) => (MOVLconst [b2i32(c)])
   412  
   413  // Lowering calls
   414  (StaticCall ...) => (CALLstatic ...)
   415  (ClosureCall ...) => (CALLclosure ...)
   416  (InterCall ...) => (CALLinter ...)
   417  (TailCall ...) => (CALLtail ...)
   418  
   419  // Lowering conditional moves
   420  // If the condition is a SETxx, we can just run a CMOV from the comparison that was
   421  // setting the flags.
   422  // Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
   423  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
   424      => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   425  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
   426      => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   427  (CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
   428      => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
   429  
   430  // If the condition does not set the flags, we need to generate a comparison.
   431  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
   432      => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
   433  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
   434      => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
   435  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
   436      => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
   437  
   438  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
   439      => (CMOVQNE y x (CMPQconst [0] check))
   440  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
   441      => (CMOVLNE y x (CMPQconst [0] check))
   442  (CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
   443      => (CMOVWNE y x (CMPQconst [0] check))
   444  
   445  // Absorb InvertFlags
   446  (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   447      => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   448  (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   449      => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   450  (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
   451      => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
   452  
   453  // Absorb constants generated during lower
   454  (CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
   455  (CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
   456  (CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
   457  (CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
   458  (CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
   459  (CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
   460  (CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
   461  (CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
   462  (CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
   463  (CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
   464  
   465  // Miscellaneous
   466  (IsNonNil p) => (SETNE (TESTQ p p))
   467  (IsInBounds idx len) => (SETB (CMPQ idx len))
   468  (IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
   469  (NilCheck ...) => (LoweredNilCheck ...)
   470  (GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
   471  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   472  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   473  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   474  
   475  (HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
   476  (Addr {sym} base) => (LEAQ {sym} base)
   477  (LocalAddr {sym} base _) => (LEAQ {sym} base)
   478  
   479  (MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
   480  (MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
   481  (MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
   482  (MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
   483  (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
   484  (MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
   485  (MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
   486  (MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
   487  (MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
   488  (MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
   489  
   490  // block rewrites
   491  (If (SETL  cmp) yes no) => (LT  cmp yes no)
   492  (If (SETLE cmp) yes no) => (LE  cmp yes no)
   493  (If (SETG  cmp) yes no) => (GT  cmp yes no)
   494  (If (SETGE cmp) yes no) => (GE  cmp yes no)
   495  (If (SETEQ cmp) yes no) => (EQ  cmp yes no)
   496  (If (SETNE cmp) yes no) => (NE  cmp yes no)
   497  (If (SETB  cmp) yes no) => (ULT cmp yes no)
   498  (If (SETBE cmp) yes no) => (ULE cmp yes no)
   499  (If (SETA  cmp) yes no) => (UGT cmp yes no)
   500  (If (SETAE cmp) yes no) => (UGE cmp yes no)
   501  (If (SETO cmp) yes no) => (OS cmp yes no)
   502  
   503  // Special case for floating point - LF/LEF not generated
   504  (If (SETGF  cmp) yes no) => (UGT  cmp yes no)
   505  (If (SETGEF cmp) yes no) => (UGE  cmp yes no)
   506  (If (SETEQF cmp) yes no) => (EQF  cmp yes no)
   507  (If (SETNEF cmp) yes no) => (NEF  cmp yes no)
   508  
   509  (If cond yes no) => (NE (TESTB cond cond) yes no)
   510  
   511  // Atomic loads.  Other than preserving their ordering with respect to other loads, nothing special here.
   512  (AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
   513  (AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
   514  (AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
   515  (AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
   516  
   517  // Atomic stores.  We use XCHG to prevent the hardware reordering a subsequent load.
   518  // TODO: most runtime uses of atomic stores don't need that property.  Use normal stores for those?
   519  (AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
   520  (AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
   521  (AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
   522  (AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
   523  
   524  // Atomic exchanges.
   525  (AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
   526  (AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
   527  
   528  // Atomic adds.
   529  (AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
   530  (AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
   531  (Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
   532  (Select1     (AddTupleFirst32   _ tuple)) => (Select1 tuple)
   533  (Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
   534  (Select1     (AddTupleFirst64   _ tuple)) => (Select1 tuple)
   535  
   536  // Atomic compare and swap.
   537  (AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
   538  (AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
   539  
   540  // Atomic memory updates.
   541  (AtomicAnd8  ptr val mem) => (ANDBlock ptr val mem)
   542  (AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
   543  (AtomicOr8   ptr val mem) => (ORBlock  ptr val mem)
   544  (AtomicOr32  ptr val mem) => (ORLlock  ptr val mem)
   545  
   546  // Write barrier.
   547  (WB ...) => (LoweredWB ...)
   548  
   549  (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   550  (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   551  (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   552  
   553  // ***************************
   554  // Above: lowering rules
   555  // Below: optimizations
   556  // ***************************
   557  // TODO: Should the optimizations be a separate pass?
   558  
   559  // Fold boolean tests into blocks
   560  (NE (TESTB (SETL  cmp) (SETL  cmp)) yes no) => (LT  cmp yes no)
   561  (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE  cmp yes no)
   562  (NE (TESTB (SETG  cmp) (SETG  cmp)) yes no) => (GT  cmp yes no)
   563  (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE  cmp yes no)
   564  (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ  cmp yes no)
   565  (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE  cmp yes no)
   566  (NE (TESTB (SETB  cmp) (SETB  cmp)) yes no) => (ULT cmp yes no)
   567  (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
   568  (NE (TESTB (SETA  cmp) (SETA  cmp)) yes no) => (UGT cmp yes no)
   569  (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
   570  (NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
   571  
   572  // Unsigned comparisons to 0/1
   573  (ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
   574  (UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
   575  (SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
   576  (SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
   577  
   578  // x & 1 != 0 -> x & 1
   579  (SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
   580  (SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
   581  
   582  // Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
   583  // Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
   584  // into tests for carry flags.
   585  // ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
   586  // mutandis, for UGE and SETAE, and CC and SETCC.
   587  ((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
   588  ((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
   589  ((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
   590      => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
   591  ((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
   592      => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
   593  ((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
   594      => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
   595  (SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE)  (BTL x y))
   596  (SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE)  (BTQ x y))
   597  (SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
   598      => (SET(B|AE)  (BTLconst [int8(log32(c))] x))
   599  (SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
   600      => (SET(B|AE)  (BTQconst [int8(log32(c))] x))
   601  (SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
   602      => (SET(B|AE)  (BTQconst [int8(log64(c))] x))
   603  // SET..store variant
   604  (SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
   605      => (SET(B|AE)store  [off] {sym} ptr (BTL x y) mem)
   606  (SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
   607      => (SET(B|AE)store  [off] {sym} ptr (BTQ x y) mem)
   608  (SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
   609      => (SET(B|AE)store  [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
   610  (SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
   611      => (SET(B|AE)store  [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
   612  (SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
   613      => (SET(B|AE)store  [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
   614  
   615  // Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
   616  // and further combining shifts.
   617  (BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
   618  (BT(Q|L)const [c] (SHLQconst [d] x)) && c>d      => (BT(Q|L)const [c-d] x)
   619  (BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
   620  (BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
   621  (BTLconst [c] (SHLLconst [d] x)) && c>d      => (BTLconst [c-d] x)
   622  (BTLconst [0] s:(SHRL x y)) => (BTL y x)
   623  
   624  // Rewrite a & 1 != 1 into a & 1 == 0.
   625  // Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
   626  (SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
   627  (SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
   628  (SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
   629  (SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
   630  
   631  // Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
   632  (OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
   633  (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
   634  
   635  // Convert ORconst into BTS, if the code gets smaller, with boundary being
   636  // (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
   637  ((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
   638      => (BT(S|C)Qconst [int8(log32(c))] x)
   639  ((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
   640      => (BT(S|C)Lconst [int8(log32(c))] x)
   641  ((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
   642      => (BT(S|C)Qconst [int8(log64(c))] x)
   643  ((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
   644      => (BT(S|C)Lconst [int8(log32(c))] x)
   645  
   646  // Recognize bit clearing: a &^= 1<<b
   647  (AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
   648  (ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
   649  (ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
   650      => (BTRQconst [int8(log32(^c))] x)
   651  (ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
   652      => (BTRLconst [int8(log32(^c))] x)
   653  (ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
   654      => (BTRQconst [int8(log64(^c))] x)
   655  (ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
   656      => (BTRLconst [int8(log32(^c))] x)
   657  
   658  // Special-case bit patterns on first/last bit.
   659  // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
   660  // for instance:
   661  //    x & 0xFFFF0000 -> (x >> 16) << 16
   662  //    x & 0x80000000 -> (x >> 31) << 31
   663  //
   664  // In case the mask is just one bit (like second example above), it conflicts
   665  // with the above rules to detect bit-testing / bit-clearing of first/last bit.
   666  // We thus special-case them, by detecting the shift patterns.
   667  
   668  // Special case resetting first/last bit
   669  (SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
   670  	=> (BTR(L|Q)const [0] x)
   671  (SHRLconst [1] (SHLLconst [1] x))
   672  	=> (BTRLconst [31] x)
   673  (SHRQconst [1] (SHLQconst [1] x))
   674  	=> (BTRQconst [63] x)
   675  
   676  // Special case testing first/last bit (with double-shift generated by generic.rules)
   677  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
   678      => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
   679  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
   680      => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
   681  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
   682      => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
   683  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
   684      => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
   685  
   686  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
   687      => ((SETB|SETAE|ULT|UGE)  (BTQconst [0] x))
   688  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
   689      => ((SETB|SETAE|ULT|UGE)  (BTLconst [0] x))
   690  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
   691      => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
   692  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
   693      => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
   694  
   695  // Special-case manually testing last bit with "a>>63 != 0" (without "&1")
   696  ((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
   697      => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
   698  ((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
   699      => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
   700  (SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
   701      => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
   702  (SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
   703      => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
   704  
   705  // Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
   706  (BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
   707  (BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
   708  (BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
   709  (BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
   710  
   711  // Fold boolean negation into SETcc.
   712  (XORLconst [1] (SETNE x)) => (SETEQ x)
   713  (XORLconst [1] (SETEQ x)) => (SETNE x)
   714  (XORLconst [1] (SETL  x)) => (SETGE x)
   715  (XORLconst [1] (SETGE x)) => (SETL  x)
   716  (XORLconst [1] (SETLE x)) => (SETG  x)
   717  (XORLconst [1] (SETG  x)) => (SETLE x)
   718  (XORLconst [1] (SETB  x)) => (SETAE x)
   719  (XORLconst [1] (SETAE x)) => (SETB  x)
   720  (XORLconst [1] (SETBE x)) => (SETA  x)
   721  (XORLconst [1] (SETA  x)) => (SETBE x)
   722  
   723  // Special case for floating point - LF/LEF not generated
   724  (NE (TESTB (SETGF  cmp) (SETGF  cmp)) yes no) => (UGT  cmp yes no)
   725  (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE  cmp yes no)
   726  (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF  cmp yes no)
   727  (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF  cmp yes no)
   728  
   729  // Disabled because it interferes with the pattern match above and makes worse code.
   730  // (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
   731  // (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
   732  
   733  // fold constants into instructions
   734  (ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x)
   735  (ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
   736  (ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
   737  
   738  (SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
   739  (SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
   740  (SUBL x (MOVLconst [c])) => (SUBLconst x [c])
   741  (SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
   742  
   743  (MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
   744  (MULL x (MOVLconst [c])) => (MULLconst [c] x)
   745  
   746  (ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
   747  (ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
   748  
   749  (AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
   750  (XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
   751  (OR(L|Q)const  [c] (OR(L|Q)const  [d] x)) => (OR(L|Q)const  [c | d] x)
   752  
   753  (BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x)
   754  (ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x)
   755  (BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
   756  
   757  (BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x)
   758  (XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x)
   759  (BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
   760  
   761  (BTSLconst [c] (ORLconst  [d] x)) => (ORLconst [d | 1<<uint32(c)] x)
   762  (ORLconst  [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x)
   763  (BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
   764  
   765  (BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c)))     => (ANDQconst [d &^ (1<<uint32(c))] x)
   766  (ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d)))     => (ANDQconst [c &^ (1<<uint32(d))] x)
   767  (BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
   768  
   769  (BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c))     => (XORQconst [d ^ 1<<uint32(c)] x)
   770  (XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d))     => (XORQconst [c ^ 1<<uint32(d)] x)
   771  (BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
   772  
   773  (BTSQconst [c] (ORQconst  [d] x)) && is32Bit(int64(d) | 1<<uint32(c))     => (ORQconst [d | 1<<uint32(c)] x)
   774  (ORQconst  [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d))     => (ORQconst [c | 1<<uint32(d)] x)
   775  (BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
   776  
   777  
   778  (MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
   779  (MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
   780  
   781  (ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
   782  (ORQ x (MOVLconst [c])) => (ORQconst [c] x)
   783  (ORL x (MOVLconst [c])) => (ORLconst [c] x)
   784  
   785  (XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
   786  (XORL x (MOVLconst [c])) => (XORLconst [c] x)
   787  
   788  (SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
   789  (SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
   790  
   791  (SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
   792  (SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
   793  (SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
   794  (SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
   795  (SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
   796  (SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
   797  
   798  (SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
   799  (SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
   800  (SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
   801  (SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
   802  
   803  
   804  // Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
   805  ((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x y)
   806  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   807  ((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
   808  ((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
   809  
   810  ((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0  => ((SHLL|SHRL|SARL) x y)
   811  ((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0  => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   812  ((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
   813  ((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
   814  
   815  ((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x y)
   816  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0  => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   817  ((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
   818  ((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
   819  
   820  ((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0  => ((SHLL|SHRL|SARL) x y)
   821  ((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0  => ((SHLL|SHRL|SARL) x (NEGL <t> y))
   822  ((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
   823  ((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
   824  
   825  // Constant rotate instructions
   826  ((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c])
   827  ((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c])
   828  
   829  ((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c])
   830  ((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c  && c < 8  && t.Size() == 1 => (ROLBconst x [c])
   831  
   832  (ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x)
   833  (ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
   834  (ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
   835  (ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
   836  
   837  (RotateLeft8  ...) => (ROLB ...)
   838  (RotateLeft16 ...) => (ROLW ...)
   839  (RotateLeft32 ...) => (ROLL ...)
   840  (RotateLeft64 ...) => (ROLQ ...)
   841  
   842  // Non-constant rotates.
   843  // We want to issue a rotate when the Go source contains code like
   844  //     y &= 63
   845  //     x << y | x >> (64-y)
   846  // The shift rules above convert << to SHLx and >> to SHRx.
   847  // SHRx converts its shift argument from 64-y to -y.
   848  // A tricky situation occurs when y==0. Then the original code would be:
   849  //     x << 0 | x >> 64
   850  // But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
   851  // to force the second term to 0. We don't need that mask, but we must match
   852  // it in order to strip it out.
   853  (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
   854  (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
   855  
   856  (ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
   857  (ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
   858  
   859  // Help with rotate detection
   860  (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT)
   861  (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst  [7] _))) [32]) => (FlagLT_ULT)
   862  
   863  (ORL (SHLL x (AND(Q|L)const y [15]))
   864       (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
   865             (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
   866    && v.Type.Size() == 2
   867    => (ROLW x y)
   868  (ORL (SHRW x (AND(Q|L)const y [15]))
   869       (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
   870    && v.Type.Size() == 2
   871    => (RORW x y)
   872  
   873  (ORL (SHLL x (AND(Q|L)const y [ 7]))
   874       (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
   875             (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
   876    && v.Type.Size() == 1
   877    => (ROLB x y)
   878  (ORL (SHRB x (AND(Q|L)const y [ 7]))
   879       (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
   880    && v.Type.Size() == 1
   881    => (RORB x y)
   882  
   883  // rotate left negative = rotate right
   884  (ROLQ x (NEG(Q|L) y)) => (RORQ x y)
   885  (ROLL x (NEG(Q|L) y)) => (RORL x y)
   886  (ROLW x (NEG(Q|L) y)) => (RORW x y)
   887  (ROLB x (NEG(Q|L) y)) => (RORB x y)
   888  
   889  // rotate right negative = rotate left
   890  (RORQ x (NEG(Q|L) y)) => (ROLQ x y)
   891  (RORL x (NEG(Q|L) y)) => (ROLL x y)
   892  (RORW x (NEG(Q|L) y)) => (ROLW x y)
   893  (RORB x (NEG(Q|L) y)) => (ROLB x y)
   894  
   895  // rotate by constants
   896  (ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
   897  (ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
   898  (ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
   899  (ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
   900  
   901  (RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
   902  (RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
   903  (RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
   904  (RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
   905  
   906  // Constant shift simplifications
   907  ((SHLQ|SHRQ|SARQ)const      x [0]) => x
   908  ((SHLL|SHRL|SARL)const      x [0]) => x
   909  ((SHRW|SARW)const           x [0]) => x
   910  ((SHRB|SARB)const           x [0]) => x
   911  ((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
   912  
   913  // Multi-register shifts
   914  (ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
   915  
   916  // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
   917  // because the x86 instructions are defined to use all 5 bits of the shift even
   918  // for the small shifts. I don't think we'll ever generate a weird shift (e.g.
   919  // (SHRW x (MOVLconst [24])), but just in case.
   920  
   921  (CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
   922  (CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
   923  (CMPL x (MOVLconst [c])) => (CMPLconst x [c])
   924  (CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
   925  (CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
   926  (CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
   927  (CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
   928  (CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
   929  
   930  // Canonicalize the order of arguments to comparisons - helps with CSE.
   931  (CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
   932  
   933  // Using MOVZX instead of AND is cheaper.
   934  (AND(Q|L)const [  0xFF] x) => (MOVBQZX x)
   935  (AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
   936  // This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
   937  // Commenting out for now, because it also can't trigger because of the is32bit guard on the
   938  // ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
   939  // Using an alternate form of this rule segfaults some binaries because of
   940  // adverse interactions with other passes.
   941  // (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
   942  
   943  // strength reduction
   944  // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
   945  //    1 - addq, shlq, leaq, negq, subq
   946  //    3 - imulq
   947  // This limits the rewrites to two instructions.
   948  // Note that negq always operates in-place,
   949  // which can require a register-register move
   950  // to preserve the original value,
   951  // so it must be used with care.
   952  (MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x))
   953  (MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x))
   954  (MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x))
   955  (MUL(Q|L)const [-1] x) => (NEG(Q|L) x)
   956  (MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
   957  (MUL(Q|L)const [ 1] x) => x
   958  (MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x)
   959  (MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x)
   960  (MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 <v.Type> x x))
   961  (MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x)
   962  (MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 <v.Type> x x))
   963  (MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 <v.Type> x x))
   964  (MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 <v.Type> x x))
   965  (MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 <v.Type> x x))
   966  (MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 <v.Type> x x))
   967  (MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 <v.Type> x x) (LEA(Q|L)2 <v.Type> x x))
   968  (MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 <v.Type> x x))
   969  (MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 <v.Type> x x))
   970  (MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 <v.Type> x x) (LEA(Q|L)4 <v.Type> x x))
   971  (MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
   972  (MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
   973  
   974  (MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >=  15 => (SUB(Q|L)  (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
   975  (MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >=  17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
   976  (MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >=  34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
   977  (MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >=  68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
   978  (MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const <v.Type> [int8(log32(c-8))] x) x)
   979  (MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 <v.Type> x x))
   980  (MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 <v.Type> x x))
   981  (MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 <v.Type> x x))
   982  
   983  // combine add/shift into LEAQ/LEAL
   984  (ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
   985  (ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
   986  (ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y)
   987  (ADD(L|Q) x (ADD(L|Q) y y))        => (LEA(L|Q)2 x y)
   988  (ADD(L|Q) x (ADD(L|Q) x y))        => (LEA(L|Q)2 y x)
   989  
   990  // combine ADDQ/ADDQconst into LEAQ1/LEAL1
   991  (ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
   992  (ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
   993  (ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x)
   994  
   995  // fold ADDQ/ADDL into LEAQ/LEAL
   996  (ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
   997  (LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
   998  (LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
   999  (ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
  1000  
  1001  // fold ADDQconst/ADDLconst into LEAQx/LEALx
  1002  (ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
  1003  (ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
  1004  (ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
  1005  (ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
  1006  (LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
  1007  (LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
  1008  (LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
  1009  (LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
  1010  (LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
  1011  (LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d))   && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
  1012  (LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
  1013  
  1014  // fold shifts into LEAQx/LEALx
  1015  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y)
  1016  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
  1017  (LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
  1018  (LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y)
  1019  (LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
  1020  (LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y)
  1021  
  1022  // reverse ordering of compare instruction
  1023  (SETL (InvertFlags x)) => (SETG x)
  1024  (SETG (InvertFlags x)) => (SETL x)
  1025  (SETB (InvertFlags x)) => (SETA x)
  1026  (SETA (InvertFlags x)) => (SETB x)
  1027  (SETLE (InvertFlags x)) => (SETGE x)
  1028  (SETGE (InvertFlags x)) => (SETLE x)
  1029  (SETBE (InvertFlags x)) => (SETAE x)
  1030  (SETAE (InvertFlags x)) => (SETBE x)
  1031  (SETEQ (InvertFlags x)) => (SETEQ x)
  1032  (SETNE (InvertFlags x)) => (SETNE x)
  1033  
  1034  (SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
  1035  (SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
  1036  (SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
  1037  (SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
  1038  (SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
  1039  (SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
  1040  (SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
  1041  (SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
  1042  (SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
  1043  (SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
  1044  
  1045  // sign extended loads
  1046  // Note: The combined instruction must end up in the same block
  1047  // as the original load. If not, we end up making a value with
  1048  // memory type live in two different blocks, which can lead to
  1049  // multiple memory values alive simultaneously.
  1050  // Make sure we don't combine these ops if the load has another use.
  1051  // This prevents a single load from being split into multiple loads
  1052  // which then might return different values.  See test/atomicload.go.
  1053  (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1054  (MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1055  (MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1056  (MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
  1057  (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1058  (MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1059  (MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1060  (MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
  1061  (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1062  (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1063  (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
  1064  (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1065  (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1066  (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
  1067  (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  1068  (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
  1069  (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  1070  (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
  1071  
  1072  (MOVLQZX x) && zeroUpper32Bits(x,3) => x
  1073  (MOVWQZX x) && zeroUpper48Bits(x,3) => x
  1074  (MOVBQZX x) && zeroUpper56Bits(x,3) => x
  1075  
  1076  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
  1077  (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
  1078  (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
  1079  (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
  1080  (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
  1081  (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
  1082  (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
  1083  (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
  1084  
  1085  // Fold extensions and ANDs together.
  1086  (MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
  1087  (MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
  1088  (MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
  1089  (MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
  1090  (MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
  1091  (MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
  1092  
  1093  // Don't extend before storing
  1094  (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
  1095  (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
  1096  (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
  1097  (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
  1098  (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
  1099  (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
  1100  
  1101  // fold constants into memory operations
  1102  // Note that this is not always a good idea because if not all the uses of
  1103  // the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
  1104  // have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
  1105  // Nevertheless, let's do it!
  1106  (MOV(Q|L|W|B|SS|SD|O)load  [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1107      (MOV(Q|L|W|B|SS|SD|O)load  [off1+off2] {sym} ptr mem)
  1108  (MOV(Q|L|W|B|SS|SD|O)store  [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1109  	(MOV(Q|L|W|B|SS|SD|O)store  [off1+off2] {sym} ptr val mem)
  1110  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1111  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
  1112  ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1113  	((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
  1114  ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1115  	((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
  1116  (CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1117  	(CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
  1118  (CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
  1119  	(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
  1120  
  1121  ((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1122  	((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
  1123  ((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
  1124  	((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
  1125  ((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
  1126  	((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
  1127  ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
  1128  	((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
  1129  ((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1130  	((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
  1131  ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
  1132  	((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
  1133  
  1134  // Fold constants into stores.
  1135  (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
  1136  	(MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
  1137  (MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1138  	(MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
  1139  (MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1140  	(MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
  1141  (MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
  1142  	(MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
  1143  
  1144  // Fold address offsets into constant stores.
  1145  (MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
  1146  	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
  1147  
  1148  // We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
  1149  // what variables are being read/written by the ops.
  1150  (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1151  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1152  	(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
  1153  (MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1154  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1155  	(MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1156  (MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
  1157  	(MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1158  (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1159  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1160  	(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1161  ((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1162  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1163  	((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1164  ((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1165  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1166  	((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1167  (CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1168  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1169  	(CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1170  (CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1171  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1172  	(CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1173  
  1174  ((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1175  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1176  	((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1177  ((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
  1178  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1179  	((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
  1180  ((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1181  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1182  	((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1183  ((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
  1184  	&& ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
  1185  	((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
  1186  ((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1187  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1188  	((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1189  ((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
  1190  	&& is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1191  	((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1192  
  1193  // fold LEAQs together
  1194  (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1195        (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
  1196  
  1197  // LEAQ into LEAQ1
  1198  (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1199         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1200  
  1201  // LEAQ1 into LEAQ
  1202  (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1203         (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1204  
  1205  // LEAQ into LEAQ[248]
  1206  (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1207         (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1208  (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1209         (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1210  (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1211         (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1212  
  1213  // LEAQ[248] into LEAQ
  1214  (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1215        (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1216  (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1217        (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1218  (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1219        (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
  1220  
  1221  // LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
  1222  (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1223        (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
  1224  (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1225        (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
  1226  (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
  1227        (LEAQ4 [off1+2*off2] {sym1} x y)
  1228  (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
  1229        (LEAQ8 [off1+4*off2] {sym1} x y)
  1230  // TODO: more?
  1231  
  1232  // Lower LEAQ2/4/8 when the offset is a constant
  1233  (LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
  1234  	(LEAQ [off+int32(scale)*2] {sym} x)
  1235  (LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
  1236  	(LEAQ [off+int32(scale)*4] {sym} x)
  1237  (LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
  1238  	(LEAQ [off+int32(scale)*8] {sym} x)
  1239  
  1240  // Absorb InvertFlags into branches.
  1241  (LT (InvertFlags cmp) yes no) => (GT cmp yes no)
  1242  (GT (InvertFlags cmp) yes no) => (LT cmp yes no)
  1243  (LE (InvertFlags cmp) yes no) => (GE cmp yes no)
  1244  (GE (InvertFlags cmp) yes no) => (LE cmp yes no)
  1245  (ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
  1246  (UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
  1247  (ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
  1248  (UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
  1249  (EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
  1250  (NE (InvertFlags cmp) yes no) => (NE cmp yes no)
  1251  
  1252  // Constant comparisons.
  1253  (CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
  1254  (CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
  1255  (CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
  1256  (CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
  1257  (CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
  1258  (CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
  1259  (CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
  1260  (CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
  1261  (CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
  1262  (CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
  1263  (CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
  1264  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
  1265  (CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
  1266  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
  1267  (CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
  1268  (CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
  1269  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
  1270  (CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
  1271  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
  1272  (CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
  1273  
  1274  // CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
  1275  // In theory this applies to any of the simplifications above,
  1276  // but CMPQ is the only one I've actually seen occur.
  1277  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
  1278  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
  1279  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
  1280  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
  1281  (CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
  1282  
  1283  // Other known comparisons.
  1284  (CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
  1285  (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
  1286  (CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
  1287  (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
  1288  (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1289  (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1290  (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
  1291  (CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
  1292  (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m)  && int8(m)  < n => (FlagLT_ULT)
  1293  
  1294  // TESTQ c c sets flags like CMPQ c 0.
  1295  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
  1296  (TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
  1297  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0  => (FlagLT_UGT)
  1298  (TESTLconst [c] (MOVLconst [c])) && c < 0  => (FlagLT_UGT)
  1299  (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0  => (FlagGT_UGT)
  1300  (TESTLconst [c] (MOVLconst [c])) && c > 0  => (FlagGT_UGT)
  1301  
  1302  // TODO: DIVxU also.
  1303  
  1304  // Absorb flag constants into SBB ops.
  1305  (SBBQcarrymask (FlagEQ))     => (MOVQconst [0])
  1306  (SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
  1307  (SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
  1308  (SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
  1309  (SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
  1310  (SBBLcarrymask (FlagEQ))     => (MOVLconst [0])
  1311  (SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
  1312  (SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
  1313  (SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
  1314  (SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
  1315  
  1316  // Absorb flag constants into branches.
  1317  ((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no)     => (First yes no)
  1318  ((NE|LT|GT|ULT|UGT) (FlagEQ) yes no)     => (First no yes)
  1319  ((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
  1320  ((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
  1321  ((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
  1322  ((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
  1323  ((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
  1324  ((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
  1325  ((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
  1326  ((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
  1327  
  1328  // Absorb flag constants into SETxx ops.
  1329  ((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ))     => (MOVLconst [1])
  1330  ((SETNE|SETL|SETG|SETB|SETA)     (FlagEQ))     => (MOVLconst [0])
  1331  ((SETNE|SETL|SETLE|SETB|SETBE)   (FlagLT_ULT)) => (MOVLconst [1])
  1332  ((SETEQ|SETG|SETGE|SETA|SETAE)   (FlagLT_ULT)) => (MOVLconst [0])
  1333  ((SETNE|SETL|SETLE|SETA|SETAE)   (FlagLT_UGT)) => (MOVLconst [1])
  1334  ((SETEQ|SETG|SETGE|SETB|SETBE)   (FlagLT_UGT)) => (MOVLconst [0])
  1335  ((SETNE|SETG|SETGE|SETB|SETBE)   (FlagGT_ULT)) => (MOVLconst [1])
  1336  ((SETEQ|SETL|SETLE|SETA|SETAE)   (FlagGT_ULT)) => (MOVLconst [0])
  1337  ((SETNE|SETG|SETGE|SETA|SETAE)   (FlagGT_UGT)) => (MOVLconst [1])
  1338  ((SETEQ|SETL|SETLE|SETB|SETBE)   (FlagGT_UGT)) => (MOVLconst [0])
  1339  
  1340  (SETEQstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1341  (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1342  (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1343  (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1344  (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1345  
  1346  (SETNEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1347  (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1348  (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1349  (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1350  (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1351  
  1352  (SETLstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1353  (SETLstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1354  (SETLstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1355  (SETLstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1356  (SETLstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1357  
  1358  (SETLEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1359  (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1360  (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1361  (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1362  (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1363  
  1364  (SETGstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1365  (SETGstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1366  (SETGstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1367  (SETGstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1368  (SETGstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1369  
  1370  (SETGEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1371  (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1372  (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1373  (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1374  (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1375  
  1376  (SETBstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1377  (SETBstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1378  (SETBstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1379  (SETBstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1380  (SETBstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1381  
  1382  (SETBEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1383  (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1384  (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1385  (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1386  (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1387  
  1388  (SETAstore  [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1389  (SETAstore  [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1390  (SETAstore  [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1391  (SETAstore  [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1392  (SETAstore  [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1393  
  1394  (SETAEstore [off] {sym} ptr (FlagEQ)     mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1395  (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1396  (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1397  (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
  1398  (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
  1399  
  1400  // Remove redundant *const ops
  1401  (ADDQconst [0] x)          => x
  1402  (ADDLconst [c] x) && c==0  => x
  1403  (SUBQconst [0] x)          => x
  1404  (SUBLconst [c] x) && c==0  => x
  1405  (ANDQconst [0] _)          => (MOVQconst [0])
  1406  (ANDLconst [c] _) && c==0  => (MOVLconst [0])
  1407  (ANDQconst [-1] x)         => x
  1408  (ANDLconst [c] x) && c==-1 => x
  1409  (ORQconst [0] x)           => x
  1410  (ORLconst [c] x)  && c==0  => x
  1411  (ORQconst [-1] _)          => (MOVQconst [-1])
  1412  (ORLconst [c] _)  && c==-1 => (MOVLconst [-1])
  1413  (XORQconst [0] x)          => x
  1414  (XORLconst [c] x) && c==0  => x
  1415  // TODO: since we got rid of the W/B versions, we might miss
  1416  // things like (ANDLconst [0x100] x) which were formerly
  1417  // (ANDBconst [0] x).  Probably doesn't happen very often.
  1418  // If we cared, we might do:
  1419  //  (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
  1420  
  1421  // Remove redundant ops
  1422  // Not in generic rules, because they may appear after lowering e. g. Slicemask
  1423  (NEG(Q|L) (NEG(Q|L) x)) => x
  1424  (NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
  1425  
  1426  // Convert constant subtracts to constant adds
  1427  (SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
  1428  (SUBLconst [c] x) => (ADDLconst [-c] x)
  1429  
  1430  // generic constant folding
  1431  // TODO: more of this
  1432  (ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
  1433  (ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
  1434  (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
  1435  (ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
  1436  (SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
  1437  (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
  1438  (SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
  1439  (SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
  1440  (SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
  1441  (SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
  1442  (NEGQ (MOVQconst [c])) => (MOVQconst [-c])
  1443  (NEGL (MOVLconst [c])) => (MOVLconst [-c])
  1444  (MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
  1445  (MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
  1446  (ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
  1447  (ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
  1448  (ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
  1449  (ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
  1450  (XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
  1451  (XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
  1452  (NOTQ (MOVQconst [c])) => (MOVQconst [^c])
  1453  (NOTL (MOVLconst [c])) => (MOVLconst [^c])
  1454  (BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
  1455  (BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))])
  1456  (BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
  1457  (BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))])
  1458  (BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
  1459  (BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))])
  1460  
  1461  // If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
  1462  // but we can still constant-fold.
  1463  // In theory this applies to any of the simplifications above,
  1464  // but ORQ is the only one I've actually seen occur.
  1465  (ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
  1466  
  1467  // generic simplifications
  1468  // TODO: more of this
  1469  (ADDQ x (NEGQ y)) => (SUBQ x y)
  1470  (ADDL x (NEGL y)) => (SUBL x y)
  1471  (SUBQ x x) => (MOVQconst [0])
  1472  (SUBL x x) => (MOVLconst [0])
  1473  (ANDQ x x) => x
  1474  (ANDL x x) => x
  1475  (ORQ x x)  => x
  1476  (ORL x x)  => x
  1477  (XORQ x x) => (MOVQconst [0])
  1478  (XORL x x) => (MOVLconst [0])
  1479  
  1480  (SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
  1481  (SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
  1482  (SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
  1483  
  1484  // Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
  1485  (NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
  1486  (MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
  1487  
  1488  // checking AND against 0.
  1489  (CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
  1490  (CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
  1491  (CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
  1492  (CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
  1493  (CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
  1494  (CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
  1495  (CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
  1496  (CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
  1497  
  1498  // Convert TESTx to TESTxconst if possible.
  1499  (TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
  1500  (TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
  1501  (TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
  1502  (TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
  1503  
  1504  // TEST %reg,%reg is shorter than CMP
  1505  (CMPQconst x [0]) => (TESTQ x x)
  1506  (CMPLconst x [0]) => (TESTL x x)
  1507  (CMPWconst x [0]) => (TESTW x x)
  1508  (CMPBconst x [0]) => (TESTB x x)
  1509  (TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
  1510  (TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
  1511  (TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
  1512  (TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
  1513  
  1514  // Convert LEAQ1 back to ADDQ if we can
  1515  (LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
  1516  
  1517  // Combining byte loads into larger (unaligned) loads.
  1518  // There are many ways these combinations could occur.  This is
  1519  // designed to match the way encoding/binary.LittleEndian does it.
  1520  
  1521  // Little-endian loads
  1522  
  1523  (OR(L|Q)                  x0:(MOVBload [i0] {s} p mem)
  1524      sh:(SHL(L|Q)const [8] x1:(MOVBload [i1] {s} p mem)))
  1525    && i1 == i0+1
  1526    && x0.Uses == 1
  1527    && x1.Uses == 1
  1528    && sh.Uses == 1
  1529    && mergePoint(b,x0,x1) != nil
  1530    && clobber(x0, x1, sh)
  1531    => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
  1532  
  1533  (OR(L|Q)                  x0:(MOVBload [i] {s} p0 mem)
  1534      sh:(SHL(L|Q)const [8] x1:(MOVBload [i] {s} p1 mem)))
  1535    && x0.Uses == 1
  1536    && x1.Uses == 1
  1537    && sh.Uses == 1
  1538    && sequentialAddresses(p0, p1, 1)
  1539    && mergePoint(b,x0,x1) != nil
  1540    && clobber(x0, x1, sh)
  1541    => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
  1542  
  1543  (OR(L|Q)                   x0:(MOVWload [i0] {s} p mem)
  1544      sh:(SHL(L|Q)const [16] x1:(MOVWload [i1] {s} p mem)))
  1545    && i1 == i0+2
  1546    && x0.Uses == 1
  1547    && x1.Uses == 1
  1548    && sh.Uses == 1
  1549    && mergePoint(b,x0,x1) != nil
  1550    && clobber(x0, x1, sh)
  1551    => @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
  1552  
  1553  (OR(L|Q)                   x0:(MOVWload [i] {s} p0 mem)
  1554      sh:(SHL(L|Q)const [16] x1:(MOVWload [i] {s} p1 mem)))
  1555    && x0.Uses == 1
  1556    && x1.Uses == 1
  1557    && sh.Uses == 1
  1558    && sequentialAddresses(p0, p1, 2)
  1559    && mergePoint(b,x0,x1) != nil
  1560    && clobber(x0, x1, sh)
  1561    => @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
  1562  
  1563  (ORQ                   x0:(MOVLload [i0] {s} p mem)
  1564      sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
  1565    && i1 == i0+4
  1566    && x0.Uses == 1
  1567    && x1.Uses == 1
  1568    && sh.Uses == 1
  1569    && mergePoint(b,x0,x1) != nil
  1570    && clobber(x0, x1, sh)
  1571    => @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
  1572  
  1573  (ORQ                   x0:(MOVLload [i] {s} p0 mem)
  1574      sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
  1575    && x0.Uses == 1
  1576    && x1.Uses == 1
  1577    && sh.Uses == 1
  1578    && sequentialAddresses(p0, p1, 4)
  1579    && mergePoint(b,x0,x1) != nil
  1580    && clobber(x0, x1, sh)
  1581    => @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
  1582  
  1583  (OR(L|Q)
  1584      s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
  1585      or:(OR(L|Q)
  1586          s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
  1587  	y))
  1588    && i1 == i0+1
  1589    && j1 == j0+8
  1590    && j0 % 16 == 0
  1591    && x0.Uses == 1
  1592    && x1.Uses == 1
  1593    && s0.Uses == 1
  1594    && s1.Uses == 1
  1595    && or.Uses == 1
  1596    && mergePoint(b,x0,x1,y) != nil
  1597    && clobber(x0, x1, s0, s1, or)
  1598    => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
  1599  
  1600  (OR(L|Q)
  1601      s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
  1602      or:(OR(L|Q)
  1603          s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
  1604  	y))
  1605    && j1 == j0+8
  1606    && j0 % 16 == 0
  1607    && x0.Uses == 1
  1608    && x1.Uses == 1
  1609    && s0.Uses == 1
  1610    && s1.Uses == 1
  1611    && or.Uses == 1
  1612    && sequentialAddresses(p0, p1, 1)
  1613    && mergePoint(b,x0,x1,y) != nil
  1614    && clobber(x0, x1, s0, s1, or)
  1615    => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
  1616  
  1617  (ORQ
  1618      s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
  1619      or:(ORQ
  1620          s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))
  1621  	y))
  1622    && i1 == i0+2
  1623    && j1 == j0+16
  1624    && j0 % 32 == 0
  1625    && x0.Uses == 1
  1626    && x1.Uses == 1
  1627    && s0.Uses == 1
  1628    && s1.Uses == 1
  1629    && or.Uses == 1
  1630    && mergePoint(b,x0,x1,y) != nil
  1631    && clobber(x0, x1, s0, s1, or)
  1632    => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
  1633  
  1634  (ORQ
  1635      s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem))
  1636      or:(ORQ
  1637          s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem))
  1638  	y))
  1639    && j1 == j0+16
  1640    && j0 % 32 == 0
  1641    && x0.Uses == 1
  1642    && x1.Uses == 1
  1643    && s0.Uses == 1
  1644    && s1.Uses == 1
  1645    && or.Uses == 1
  1646    && sequentialAddresses(p0, p1, 2)
  1647    && mergePoint(b,x0,x1,y) != nil
  1648    && clobber(x0, x1, s0, s1, or)
  1649    => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
  1650  
  1651  // Big-endian loads
  1652  
  1653  (OR(L|Q)
  1654                             x1:(MOVBload [i1] {s} p mem)
  1655      sh:(SHL(L|Q)const [8]  x0:(MOVBload [i0] {s} p mem)))
  1656    && i1 == i0+1
  1657    && x0.Uses == 1
  1658    && x1.Uses == 1
  1659    && sh.Uses == 1
  1660    && mergePoint(b,x0,x1) != nil
  1661    && clobber(x0, x1, sh)
  1662    => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
  1663  
  1664  (OR(L|Q)
  1665                             x1:(MOVBload [i] {s} p1 mem)
  1666      sh:(SHL(L|Q)const [8]  x0:(MOVBload [i] {s} p0 mem)))
  1667    && x0.Uses == 1
  1668    && x1.Uses == 1
  1669    && sh.Uses == 1
  1670    && sequentialAddresses(p0, p1, 1)
  1671    && mergePoint(b,x0,x1) != nil
  1672    && clobber(x0, x1, sh)
  1673    => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
  1674  
  1675  (OR(L|Q)
  1676                              r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
  1677      sh:(SHL(L|Q)const [16]  r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
  1678    && i1 == i0+2
  1679    && x0.Uses == 1
  1680    && x1.Uses == 1
  1681    && r0.Uses == 1
  1682    && r1.Uses == 1
  1683    && sh.Uses == 1
  1684    && mergePoint(b,x0,x1) != nil
  1685    && clobber(x0, x1, r0, r1, sh)
  1686    => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
  1687  
  1688  (OR(L|Q)
  1689                              r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))
  1690      sh:(SHL(L|Q)const [16]  r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
  1691    && x0.Uses == 1
  1692    && x1.Uses == 1
  1693    && r0.Uses == 1
  1694    && r1.Uses == 1
  1695    && sh.Uses == 1
  1696    && sequentialAddresses(p0, p1, 2)
  1697    && mergePoint(b,x0,x1) != nil
  1698    && clobber(x0, x1, r0, r1, sh)
  1699    => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
  1700  
  1701  (ORQ
  1702                          r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
  1703      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
  1704    && i1 == i0+4
  1705    && x0.Uses == 1
  1706    && x1.Uses == 1
  1707    && r0.Uses == 1
  1708    && r1.Uses == 1
  1709    && sh.Uses == 1
  1710    && mergePoint(b,x0,x1) != nil
  1711    && clobber(x0, x1, r0, r1, sh)
  1712    => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
  1713  
  1714  (ORQ
  1715                          r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem))
  1716      sh:(SHLQconst [32]  r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
  1717    && x0.Uses == 1
  1718    && x1.Uses == 1
  1719    && r0.Uses == 1
  1720    && r1.Uses == 1
  1721    && sh.Uses == 1
  1722    && sequentialAddresses(p0, p1, 4)
  1723    && mergePoint(b,x0,x1) != nil
  1724    && clobber(x0, x1, r0, r1, sh)
  1725    => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
  1726  
  1727  (OR(L|Q)
  1728      s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
  1729      or:(OR(L|Q)
  1730          s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
  1731  	y))
  1732    && i1 == i0+1
  1733    && j1 == j0-8
  1734    && j1 % 16 == 0
  1735    && x0.Uses == 1
  1736    && x1.Uses == 1
  1737    && s0.Uses == 1
  1738    && s1.Uses == 1
  1739    && or.Uses == 1
  1740    && mergePoint(b,x0,x1,y) != nil
  1741    && clobber(x0, x1, s0, s1, or)
  1742    => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
  1743  
  1744  (OR(L|Q)
  1745      s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
  1746      or:(OR(L|Q)
  1747          s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
  1748  	y))
  1749    && j1 == j0-8
  1750    && j1 % 16 == 0
  1751    && x0.Uses == 1
  1752    && x1.Uses == 1
  1753    && s0.Uses == 1
  1754    && s1.Uses == 1
  1755    && or.Uses == 1
  1756    && sequentialAddresses(p0, p1, 1)
  1757    && mergePoint(b,x0,x1,y) != nil
  1758    && clobber(x0, x1, s0, s1, or)
  1759    => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
  1760  
  1761  (ORQ
  1762      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
  1763      or:(ORQ
  1764          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
  1765  	y))
  1766    && i1 == i0+2
  1767    && j1 == j0-16
  1768    && j1 % 32 == 0
  1769    && x0.Uses == 1
  1770    && x1.Uses == 1
  1771    && r0.Uses == 1
  1772    && r1.Uses == 1
  1773    && s0.Uses == 1
  1774    && s1.Uses == 1
  1775    && or.Uses == 1
  1776    && mergePoint(b,x0,x1,y) != nil
  1777    && clobber(x0, x1, r0, r1, s0, s1, or)
  1778    => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
  1779  
  1780  (ORQ
  1781      s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))
  1782      or:(ORQ
  1783          s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)))
  1784  	y))
  1785    && j1 == j0-16
  1786    && j1 % 32 == 0
  1787    && x0.Uses == 1
  1788    && x1.Uses == 1
  1789    && r0.Uses == 1
  1790    && r1.Uses == 1
  1791    && s0.Uses == 1
  1792    && s1.Uses == 1
  1793    && or.Uses == 1
  1794    && sequentialAddresses(p0, p1, 2)
  1795    && mergePoint(b,x0,x1,y) != nil
  1796    && clobber(x0, x1, r0, r1, s0, s1, or)
  1797    => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
  1798  
  1799  // Combine 2 byte stores + shift into rolw 8 + word store
  1800  (MOVBstore [i] {s} p w
  1801    x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
  1802    && x0.Uses == 1
  1803    && clobber(x0)
  1804    => (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
  1805  (MOVBstore [i] {s} p1 w
  1806    x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
  1807    && x0.Uses == 1
  1808    && sequentialAddresses(p0, p1, 1)
  1809    && clobber(x0)
  1810    => (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
  1811  
  1812  // Combine stores + shifts into bswap and larger (unaligned) stores
  1813  (MOVBstore [i] {s} p w
  1814    x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)
  1815    x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)
  1816    x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
  1817    && x0.Uses == 1
  1818    && x1.Uses == 1
  1819    && x2.Uses == 1
  1820    && clobber(x0, x1, x2)
  1821    => (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
  1822  (MOVBstore [i] {s} p3 w
  1823    x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w)
  1824    x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w)
  1825    x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
  1826    && x0.Uses == 1
  1827    && x1.Uses == 1
  1828    && x2.Uses == 1
  1829    && sequentialAddresses(p0, p1, 1)
  1830    && sequentialAddresses(p1, p2, 1)
  1831    && sequentialAddresses(p2, p3, 1)
  1832    && clobber(x0, x1, x2)
  1833    => (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
  1834  
  1835  (MOVBstore [i] {s} p w
  1836    x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
  1837    x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)
  1838    x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)
  1839    x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)
  1840    x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)
  1841    x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)
  1842    x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
  1843    && x0.Uses == 1
  1844    && x1.Uses == 1
  1845    && x2.Uses == 1
  1846    && x3.Uses == 1
  1847    && x4.Uses == 1
  1848    && x5.Uses == 1
  1849    && x6.Uses == 1
  1850    && clobber(x0, x1, x2, x3, x4, x5, x6)
  1851    => (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
  1852  (MOVBstore [i] {s} p7 w
  1853    x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w)
  1854    x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w)
  1855    x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w)
  1856    x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w)
  1857    x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w)
  1858    x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w)
  1859    x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
  1860    && x0.Uses == 1
  1861    && x1.Uses == 1
  1862    && x2.Uses == 1
  1863    && x3.Uses == 1
  1864    && x4.Uses == 1
  1865    && x5.Uses == 1
  1866    && x6.Uses == 1
  1867    && sequentialAddresses(p0, p1, 1)
  1868    && sequentialAddresses(p1, p2, 1)
  1869    && sequentialAddresses(p2, p3, 1)
  1870    && sequentialAddresses(p3, p4, 1)
  1871    && sequentialAddresses(p4, p5, 1)
  1872    && sequentialAddresses(p5, p6, 1)
  1873    && sequentialAddresses(p6, p7, 1)
  1874    && clobber(x0, x1, x2, x3, x4, x5, x6)
  1875    => (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
  1876  
  1877  // Combine constant stores into larger (unaligned) stores.
  1878  (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
  1879    && x.Uses == 1
  1880    && a.Off() + 1 == c.Off()
  1881    && clobber(x)
  1882    => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
  1883  (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
  1884    && x.Uses == 1
  1885    && a.Off() + 1 == c.Off()
  1886    && clobber(x)
  1887    => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
  1888  (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
  1889    && x.Uses == 1
  1890    && a.Off() + 2 == c.Off()
  1891    && clobber(x)
  1892    => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
  1893  (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
  1894    && x.Uses == 1
  1895    && a.Off() + 2 == c.Off()
  1896    && clobber(x)
  1897    => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
  1898  (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
  1899    && x.Uses == 1
  1900    && a.Off() + 4 == c.Off()
  1901    && clobber(x)
  1902    => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
  1903  (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
  1904    && x.Uses == 1
  1905    && a.Off() + 4 == c.Off()
  1906    && clobber(x)
  1907    => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
  1908  (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [a] {s} p mem))
  1909    && config.useSSE
  1910    && x.Uses == 1
  1911    && a.Off() + 8 == c.Off()
  1912    && a.Val() == 0
  1913    && c.Val() == 0
  1914    && clobber(x)
  1915    => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem)
  1916  (MOVQstoreconst [a] {s} p x:(MOVQstoreconst [c] {s} p mem))
  1917    && config.useSSE
  1918    && x.Uses == 1
  1919    && a.Off() + 8 == c.Off()
  1920    && a.Val() == 0
  1921    && c.Val() == 0
  1922    && clobber(x)
  1923    => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem)
  1924  
  1925  // Combine stores into larger (unaligned) stores. Little endian.
  1926  (MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
  1927    && x.Uses == 1
  1928    && clobber(x)
  1929    => (MOVWstore [i-1] {s} p w mem)
  1930  (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem))
  1931    && x.Uses == 1
  1932    && clobber(x)
  1933    => (MOVWstore [i] {s} p w mem)
  1934  (MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem))
  1935    && x.Uses == 1
  1936    && clobber(x)
  1937    => (MOVWstore [i-1] {s} p w0 mem)
  1938  (MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
  1939    && x.Uses == 1
  1940    && sequentialAddresses(p0, p1, 1)
  1941    && clobber(x)
  1942    => (MOVWstore [i] {s} p0 w mem)
  1943  (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) mem))
  1944    && x.Uses == 1
  1945    && sequentialAddresses(p0, p1, 1)
  1946    && clobber(x)
  1947    => (MOVWstore [i] {s} p0 w mem)
  1948  (MOVBstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVBstore [i] {s} p0 w0:(SHR(L|Q)const [j-8] w) mem))
  1949    && x.Uses == 1
  1950    && sequentialAddresses(p0, p1, 1)
  1951    && clobber(x)
  1952    => (MOVWstore [i] {s} p0 w0 mem)
  1953  
  1954  (MOVWstore [i] {s} p (SHR(L|Q)const [16] w) x:(MOVWstore [i-2] {s} p w mem))
  1955    && x.Uses == 1
  1956    && clobber(x)
  1957    => (MOVLstore [i-2] {s} p w mem)
  1958  (MOVWstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVWstore [i-2] {s} p w0:(SHR(L|Q)const [j-16] w) mem))
  1959    && x.Uses == 1
  1960    && clobber(x)
  1961    => (MOVLstore [i-2] {s} p w0 mem)
  1962  (MOVWstore [i] {s} p1 (SHR(L|Q)const [16] w) x:(MOVWstore [i] {s} p0 w mem))
  1963    && x.Uses == 1
  1964    && sequentialAddresses(p0, p1, 2)
  1965    && clobber(x)
  1966    => (MOVLstore [i] {s} p0 w mem)
  1967  (MOVWstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVWstore [i] {s} p0 w0:(SHR(L|Q)const [j-16] w) mem))
  1968    && x.Uses == 1
  1969    && sequentialAddresses(p0, p1, 2)
  1970    && clobber(x)
  1971    => (MOVLstore [i] {s} p0 w0 mem)
  1972  
  1973  (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
  1974    && x.Uses == 1
  1975    && clobber(x)
  1976    => (MOVQstore [i-4] {s} p w mem)
  1977  (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
  1978    && x.Uses == 1
  1979    && clobber(x)
  1980    => (MOVQstore [i-4] {s} p w0 mem)
  1981  (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
  1982    && x.Uses == 1
  1983    && sequentialAddresses(p0, p1, 4)
  1984    && clobber(x)
  1985    => (MOVQstore [i] {s} p0 w mem)
  1986  (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
  1987    && x.Uses == 1
  1988    && sequentialAddresses(p0, p1, 4)
  1989    && clobber(x)
  1990    => (MOVQstore [i] {s} p0 w0 mem)
  1991  
  1992  (MOVBstore [7] {s} p1 (SHRQconst [56] w)
  1993    x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w)
  1994    x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w)
  1995    x3:(MOVBstore [0] {s} p1 w mem))))
  1996    && x1.Uses == 1
  1997    && x2.Uses == 1
  1998    && x3.Uses == 1
  1999    && clobber(x1, x2, x3)
  2000    => (MOVQstore {s} p1 w mem)
  2001  
  2002  (MOVBstore [i] {s} p
  2003    x1:(MOVBload [j] {s2} p2 mem)
  2004      mem2:(MOVBstore [i-1] {s} p
  2005        x2:(MOVBload [j-1] {s2} p2 mem) mem))
  2006    && x1.Uses == 1
  2007    && x2.Uses == 1
  2008    && mem2.Uses == 1
  2009    && clobber(x1, x2, mem2)
  2010    => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
  2011  
  2012  (MOVWstore [i] {s} p
  2013    x1:(MOVWload [j] {s2} p2 mem)
  2014      mem2:(MOVWstore [i-2] {s} p
  2015        x2:(MOVWload [j-2] {s2} p2 mem) mem))
  2016    && x1.Uses == 1
  2017    && x2.Uses == 1
  2018    && mem2.Uses == 1
  2019    && clobber(x1, x2, mem2)
  2020    => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
  2021  
  2022  (MOVLstore [i] {s} p
  2023    x1:(MOVLload [j] {s2} p2 mem)
  2024      mem2:(MOVLstore [i-4] {s} p
  2025        x2:(MOVLload [j-4] {s2} p2 mem) mem))
  2026    && x1.Uses == 1
  2027    && x2.Uses == 1
  2028    && mem2.Uses == 1
  2029    && clobber(x1, x2, mem2)
  2030    => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
  2031  
  2032  // Merge load and op
  2033  // TODO: add indexed variants?
  2034  ((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
  2035  ((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
  2036  ((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
  2037  ((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
  2038  (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
  2039  (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
  2040  	((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
  2041  (MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
  2042  (MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
  2043  	((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
  2044  
  2045  // Merge ADDQconst and LEAQ into atomic loads.
  2046  (MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  2047  	(MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
  2048  (MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  2049  	(MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
  2050  
  2051  // Merge ADDQconst and LEAQ into atomic stores.
  2052  (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  2053  	(XCHGQ [off1+off2] {sym} val ptr mem)
  2054  (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
  2055  	(XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2056  (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  2057  	(XCHGL [off1+off2] {sym} val ptr mem)
  2058  (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
  2059  	(XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
  2060  
  2061  // Merge ADDQconst into atomic adds.
  2062  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2063  (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  2064  	(XADDQlock [off1+off2] {sym} val ptr mem)
  2065  (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
  2066  	(XADDLlock [off1+off2] {sym} val ptr mem)
  2067  
  2068  // Merge ADDQconst into atomic compare and swaps.
  2069  // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
  2070  (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
  2071  	(CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
  2072  (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
  2073  	(CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
  2074  
  2075  // We don't need the conditional move if we know the arg of BSF is not zero.
  2076  (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x
  2077  // Extension is unnecessary for trailing zeros.
  2078  (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
  2079  (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
  2080  
  2081  // Redundant sign/zero extensions
  2082  // Note: see issue 21963. We have to make sure we use the right type on
  2083  // the resulting extension (the outer type, not the inner type).
  2084  (MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
  2085  (MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
  2086  (MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
  2087  (MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
  2088  (MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
  2089  (MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
  2090  (MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
  2091  (MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
  2092  (MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
  2093  (MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
  2094  (MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
  2095  (MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
  2096  
  2097  (MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
  2098  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
  2099  	((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  2100  (MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
  2101  	&& isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
  2102  	((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
  2103  
  2104  // float <-> int register moves, with no conversion.
  2105  // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
  2106  (MOVQload  [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
  2107  (MOVLload  [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
  2108  (MOVSDload [off] {sym} ptr (MOVQstore  [off] {sym} ptr val _)) => (MOVQi2f val)
  2109  (MOVSSload [off] {sym} ptr (MOVLstore  [off] {sym} ptr val _)) => (MOVLi2f val)
  2110  
  2111  // Other load-like ops.
  2112  (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
  2113  (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
  2114  (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
  2115  (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
  2116  (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
  2117  (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
  2118  ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
  2119  ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
  2120  (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
  2121  (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
  2122  
  2123  (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
  2124  (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
  2125  (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
  2126  (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
  2127  (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
  2128  (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
  2129  
  2130  // Redirect stores to use the other register set.
  2131  (MOVQstore  [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
  2132  (MOVLstore  [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
  2133  (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore  [off] {sym} ptr val mem)
  2134  (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore  [off] {sym} ptr val mem)
  2135  
  2136  // Load args directly into the register class where it will be used.
  2137  // We do this by just modifying the type of the Arg.
  2138  (MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  2139  (MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  2140  (MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  2141  (MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
  2142  
  2143  // LEAQ is rematerializeable, so this helps to avoid register spill.
  2144  // See issue 22947 for details
  2145  (ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
  2146  
  2147  // HMULx is commutative, but its first argument must go in AX.
  2148  // If possible, put a rematerializeable value in the first argument slot,
  2149  // to reduce the odds that another value will be have to spilled
  2150  // specifically to free up AX.
  2151  (HMUL(Q|L)  x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)  y x)
  2152  (HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
  2153  
  2154  // Fold loads into compares
  2155  // Note: these may be undone by the flagalloc pass.
  2156  (CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
  2157  (CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
  2158  
  2159  (CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
  2160  	&& l.Uses == 1
  2161  	&& clobber(l) =>
  2162  @l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
  2163  (CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
  2164  	&& l.Uses == 1
  2165  	&& clobber(l) =>
  2166  @l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
  2167  
  2168  (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
  2169  (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
  2170  (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
  2171  (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
  2172  
  2173  (TEST(Q|L|W|B)  l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
  2174          && l == l2
  2175  	&& l.Uses == 2
  2176  	&& clobber(l) =>
  2177    @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
  2178  
  2179  // Convert ANDload to MOVload when we can do the AND in a containing TEST op.
  2180  // Only do when it's within the same block, so we don't have flags live across basic block boundaries.
  2181  // See issue 44228.
  2182  (TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x)
  2183  
  2184  (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
  2185  (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  2186  (MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  2187  (MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  2188  (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
  2189    (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
  2190      (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
  2191  
  2192  // Arch-specific inlining for small or disjoint runtime.memmove
  2193  // Match post-lowering calls, memory version.
  2194  (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
  2195  	&& sc.Val64() >= 0
  2196  	&& isSameCall(sym, "runtime.memmove")
  2197  	&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  2198  	&& isInlinableMemmove(dst, src, sc.Val64(), config)
  2199  	&& clobber(s1, s2, s3, call)
  2200  	=> (Move [sc.Val64()] dst src mem)
  2201  
  2202  // Match post-lowering calls, register version.
  2203  (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
  2204  	&& sz >= 0
  2205  	&& isSameCall(sym, "runtime.memmove")
  2206  	&& call.Uses == 1
  2207  	&& isInlinableMemmove(dst, src, sz, config)
  2208  	&& clobber(call)
  2209  	=> (Move [sz] dst src mem)
  2210  
  2211  // Prefetch instructions
  2212  (PrefetchCache ...)   => (PrefetchT0 ...)
  2213  (PrefetchCacheStreamed ...) => (PrefetchNTA ...)
  2214  
  2215  // CPUID feature: BMI1.
  2216  (AND(Q|L) x (NOT(Q|L) y))           && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y)
  2217  (AND(Q|L) x (NEG(Q|L) x))           && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x)
  2218  (XOR(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x)
  2219  (AND(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSR(Q|L) x)
  2220  
  2221  (BSWAP(Q|L) (BSWAP(Q|L) p)) => p
  2222  
  2223  // CPUID feature: MOVBE.
  2224  (MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
  2225  (BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem))    && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)load [i] {s} p mem)
  2226  (BSWAP(Q|L) (MOVBE(Q|L)load [i] {s} p m))    => (MOV(Q|L)load [i] {s} p m)
  2227  (MOVBE(Q|L)store [i] {s} p (BSWAP(Q|L) x) m) => (MOV(Q|L)store [i] {s} p x m)
  2228  
  2229  (ORQ                   x0:(MOVBELload [i0] {s} p mem)
  2230      sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem)))
  2231    && i0 == i1+4
  2232    && x0.Uses == 1
  2233    && x1.Uses == 1
  2234    && sh.Uses == 1
  2235    && mergePoint(b,x0,x1) != nil
  2236    && clobber(x0, x1, sh)
  2237    => @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem)
  2238  
  2239  (ORQ                   x0:(MOVBELload [i] {s} p0 mem)
  2240      sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem)))
  2241    && x0.Uses == 1
  2242    && x1.Uses == 1
  2243    && sh.Uses == 1
  2244    && sequentialAddresses(p1, p0, 4)
  2245    && mergePoint(b,x0,x1) != nil
  2246    && clobber(x0, x1, sh)
  2247    => @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)
  2248  

View as plain text