Text file src/cmd/compile/internal/ssa/gen/PPC64.rules

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(Ptr|64|32|16|8) ...) => (ADD ...)
     7  (Add64F ...) => (FADD ...)
     8  (Add32F ...) => (FADDS ...)
     9  
    10  (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
    11  (Sub32F ...) => (FSUBS ...)
    12  (Sub64F ...) => (FSUB ...)
    13  
    14  // Combine 64 bit integer multiply and adds
    15  (ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
    16  
    17  (Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    18  (Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    19  (Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    20  (Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    21  (Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
    22  (Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
    23  (Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
    24  (Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
    25  (Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
    26  (Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
    27  (Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
    28  (Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
    29  
    30  // (x + y) / 2 with x>=y => (x - y) / 2 + y
    31  (Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    32  
    33  (Add64carry ...) => (LoweredAdd64Carry ...)
    34  (Mul64 ...) => (MULLD ...)
    35  (Mul(32|16|8) ...) => (MULLW ...)
    36  (Mul64uhilo ...) => (LoweredMuluhilo ...)
    37  
    38  (Div64 [false] x y) => (DIVD x y)
    39  (Div64u ...) => (DIVDU ...)
    40  (Div32 [false] x y) => (DIVW x y)
    41  (Div32u ...) => (DIVWU ...)
    42  (Div16 [false]  x y) => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    43  (Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    44  (Div8 x y) => (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    45  (Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    46  
    47  (Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
    48  
    49  (Mul32F ...) => (FMULS ...)
    50  (Mul64F ...) => (FMUL ...)
    51  
    52  (Div32F ...) => (FDIVS ...)
    53  (Div64F ...) => (FDIV ...)
    54  
    55  // Lowering float <=> int
    56  (Cvt32to32F x) => (FCFIDS (MTVSRD (SignExt32to64 x)))
    57  (Cvt32to64F x) => (FCFID (MTVSRD (SignExt32to64 x)))
    58  (Cvt64to32F x) => (FCFIDS (MTVSRD x))
    59  (Cvt64to64F x) => (FCFID (MTVSRD x))
    60  
    61  (Cvt32Fto32 x) => (MFVSRD (FCTIWZ x))
    62  (Cvt32Fto64 x) => (MFVSRD (FCTIDZ x))
    63  (Cvt64Fto32 x) => (MFVSRD (FCTIWZ x))
    64  (Cvt64Fto64 x) => (MFVSRD (FCTIDZ x))
    65  
    66  (Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
    67  (Cvt64Fto32F ...) => (FRSP ...)
    68  
    69  (CvtBoolToUint8 ...) => (Copy ...)
    70  
    71  (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
    72  
    73  (Sqrt ...) => (FSQRT ...)
    74  (Sqrt32 ...) => (FSQRTS ...)
    75  (Floor ...) => (FFLOOR ...)
    76  (Ceil ...) => (FCEIL ...)
    77  (Trunc ...) => (FTRUNC ...)
    78  (Round ...) => (FROUND ...)
    79  (Copysign x y) => (FCPSGN y x)
    80  (Abs ...) => (FABS ...)
    81  (FMA ...) => (FMADD ...)
    82  
    83  // Lowering extension
    84  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
    85  (SignExt8to(16|32|64) ...) => (MOVBreg ...)
    86  (SignExt16to(32|64) ...) => (MOVHreg ...)
    87  (SignExt32to64 ...) => (MOVWreg ...)
    88  
    89  (ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
    90  (ZeroExt16to(32|64) ...) => (MOVHZreg ...)
    91  (ZeroExt32to64 ...) => (MOVWZreg ...)
    92  
    93  (Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
    94  (Trunc(16|32|64)to8  x) => (MOVBZreg x)
    95  (Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
    96  (Trunc(32|64)to16 x) => (MOVHZreg x)
    97  (Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
    98  (Trunc64to32 x) => (MOVWZreg x)
    99  
   100  // Lowering constants
   101  (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   102  (Const(32|64)F ...) => (FMOV(S|D)const ...)
   103  (ConstNil) => (MOVDconst [0])
   104  (ConstBool [t]) => (MOVDconst [b2i(t)])
   105  
   106  // Constant folding
   107  (FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
   108  (FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
   109  (FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
   110  (FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
   111  (FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
   112  
   113  // Rotates
   114  (RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
   115  (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
   116  (RotateLeft32 x (MOVDconst [c])) => (ROTLWconst [c&31] x)
   117  (RotateLeft64 x (MOVDconst [c])) => (ROTLconst [c&63] x)
   118  
   119  // Rotate generation with const shift
   120  (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
   121  ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
   122  (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
   123  
   124  (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
   125  ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
   126  (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
   127  
   128  // Rotate generation with non-const shift
   129  // these match patterns from math/bits/RotateLeft[32|64], but there could be others
   130  (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
   131  (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
   132  ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
   133  ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
   134  (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
   135  (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
   136  
   137  
   138  (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
   139  (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
   140  ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
   141  ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
   142  (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
   143  (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
   144  
   145  
   146  // Lowering rotates
   147  (RotateLeft32 x y) => (ROTLW x y)
   148  (RotateLeft64 x y) => (ROTL x y)
   149  
   150  // Constant rotate generation
   151  (ROTLW  x (MOVDconst [c])) => (ROTLWconst  x [c&31])
   152  (ROTL   x (MOVDconst [c])) => (ROTLconst   x [c&63])
   153  
   154  // Combine rotate and mask operations
   155  (ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   156  (AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   157  (ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   158  (AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   159  
   160  // Note, any rotated word bitmask is still a valid word bitmask.
   161  (ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   162  (ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   163  
   164  (ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   165  (ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   166  (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   167  (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   168  
   169  (SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   170  (SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   171  (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   172  (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   173  
   174  // Merge shift right + shift left and clear left (e.g for a table lookup)
   175  (CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
   176  (SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
   177  // The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
   178  (CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
   179  
   180  // large constant shifts
   181  (Lsh64x64  _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
   182  (Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
   183  (Lsh32x64  _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
   184  (Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
   185  (Lsh16x64  _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
   186  (Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
   187  (Lsh8x64   _ (MOVDconst [c])) && uint64(c) >= 8  => (MOVDconst [0])
   188  (Rsh8Ux64  _ (MOVDconst [c])) && uint64(c) >= 8  => (MOVDconst [0])
   189  
   190  // large constant signed right shift, we leave the sign bit
   191  (Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
   192  (Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
   193  (Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
   194  (Rsh8x64  x (MOVDconst [c])) && uint64(c) >= 8  => (SRAWconst (SignExt8to32  x) [63])
   195  
   196  // constant shifts
   197  (Lsh64x64  x (MOVDconst [c])) && uint64(c) < 64 => (SLDconst x [c])
   198  (Rsh64x64  x (MOVDconst [c])) && uint64(c) < 64 => (SRADconst x [c])
   199  (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 => (SRDconst x [c])
   200  (Lsh32x64  x (MOVDconst [c])) && uint64(c) < 32 => (SLWconst x [c])
   201  (Rsh32x64  x (MOVDconst [c])) && uint64(c) < 32 => (SRAWconst x [c])
   202  (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 => (SRWconst x [c])
   203  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
   204  (Rsh16x64  x (MOVDconst [c])) && uint64(c) < 16 => (SRAWconst (SignExt16to32 x) [c])
   205  (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 => (SRWconst (ZeroExt16to32 x) [c])
   206  (Lsh8x64   x (MOVDconst [c])) && uint64(c) < 8  => (SLWconst x [c])
   207  (Rsh8x64   x (MOVDconst [c])) && uint64(c) < 8  => (SRAWconst (SignExt8to32  x) [c])
   208  (Rsh8Ux64  x (MOVDconst [c])) && uint64(c) < 8  => (SRWconst (ZeroExt8to32  x) [c])
   209  
   210  (Lsh64x32  x (MOVDconst [c])) && uint32(c) < 64 => (SLDconst x [c&63])
   211  (Rsh64x32  x (MOVDconst [c])) && uint32(c) < 64 => (SRADconst x [c&63])
   212  (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 => (SRDconst x [c&63])
   213  (Lsh32x32  x (MOVDconst [c])) && uint32(c) < 32 => (SLWconst x [c&31])
   214  (Rsh32x32  x (MOVDconst [c])) && uint32(c) < 32 => (SRAWconst x [c&31])
   215  (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 => (SRWconst x [c&31])
   216  (Lsh16x32  x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c&31])
   217  (Rsh16x32  x (MOVDconst [c])) && uint32(c) < 16 => (SRAWconst (SignExt16to32 x) [c&15])
   218  (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 => (SRWconst (ZeroExt16to32 x) [c&15])
   219  (Lsh8x32   x (MOVDconst [c])) && uint32(c) < 8  => (SLWconst x [c&7])
   220  (Rsh8x32   x (MOVDconst [c])) && uint32(c) < 8  => (SRAWconst (SignExt8to32  x) [c&7])
   221  (Rsh8Ux32  x (MOVDconst [c])) && uint32(c) < 8  => (SRWconst (ZeroExt8to32  x) [c&7])
   222  
   223  // Lower bounded shifts first. No need to check shift value.
   224  (Lsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   225  (Lsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   226  (Lsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   227  (Lsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SLW x y)
   228  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
   229  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
   230  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
   231  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
   232  (Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD x y)
   233  (Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW x y)
   234  (Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
   235  (Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
   236  
   237  // non-constant rotates
   238  // These are subexpressions found in statements that can become rotates
   239  // In these cases the shift count is known to be < 64 so the more complicated expressions
   240  // with Mask & Carry is not needed
   241  (Lsh64x64 x (AND y (MOVDconst [63]))) => (SLD x (ANDconst <typ.Int64> [63] y))
   242  (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) => (SLD x (ANDconst <typ.Int64> [63] y))
   243  (Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y))
   244  (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y))
   245  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   246  (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRD x (SUBFCconst <typ.UInt> [64]  (ANDconst <typ.UInt> [63] y)))
   247  (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   248  (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
   249  (Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y))
   250  (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y))
   251  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   252  (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUBFCconst <typ.UInt> [64]  (ANDconst <typ.UInt> [63] y)))
   253  (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
   254  (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
   255  
   256  (Lsh64x64 x y)  => (SLD  x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   257  (Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   258  (Rsh64Ux64 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   259  
   260  (Lsh32x64 x (AND y (MOVDconst [31]))) => (SLW x (ANDconst <typ.Int32> [31] y))
   261  (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) => (SLW x (ANDconst <typ.Int32> [31] y))
   262  
   263  (Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y))
   264  (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y))
   265  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   266  (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
   267  (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   268  (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
   269  
   270  (Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y))
   271  (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y))
   272  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   273  (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
   274  (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
   275  (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
   276  
   277  (Rsh32x64 x y)  => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   278  (Rsh32Ux64 x y) => (SRW  x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   279  (Lsh32x64 x y)  => (SLW  x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   280  
   281  (Rsh16x64 x y)  => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   282  (Rsh16Ux64 x y) => (SRW  (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   283  (Lsh16x64 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   284  
   285  (Rsh8x64 x y)  => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   286  (Rsh8Ux64 x y) => (SRW  (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   287  (Lsh8x64 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   288  
   289  (Rsh64x32 x y)  => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   290  (Rsh64Ux32 x y) => (SRD x  (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   291  (Lsh64x32 x y)  => (SLD x  (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
   292  (Rsh32x32 x y)  => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   293  (Rsh32Ux32 x y) => (SRW x  (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   294  (Lsh32x32 x y)  => (SLW x  (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
   295  
   296  (Rsh16x32 x y)  => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   297  (Rsh16Ux32 x y) => (SRW  (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   298  (Lsh16x32 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
   299  
   300  (Rsh8x32 x y)  => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   301  (Rsh8Ux32 x y) => (SRW  (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   302  (Lsh8x32 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
   303  
   304  
   305  (Rsh64x16 x y)  => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
   306  (Rsh64Ux16 x y) => (SRD x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
   307  (Lsh64x16 x y)  => (SLD x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
   308  
   309  (Rsh32x16 x y)  => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
   310  (Rsh32Ux16 x y) => (SRW x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
   311  (Lsh32x16 x y)  => (SLW x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
   312  
   313  (Rsh16x16 x y)  => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
   314  (Rsh16Ux16 x y) => (SRW  (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
   315  (Lsh16x16 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
   316  
   317  (Rsh8x16 x y)  => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
   318  (Rsh8Ux16 x y) => (SRW  (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
   319  (Lsh8x16 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
   320  
   321  
   322  (Rsh64x8 x y)  => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
   323  (Rsh64Ux8 x y) => (SRD x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
   324  (Lsh64x8 x y)  => (SLD x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
   325  
   326  (Rsh32x8 x y)  => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
   327  (Rsh32Ux8 x y) => (SRW x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
   328  (Lsh32x8 x y)  => (SLW x  (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
   329  
   330  (Rsh16x8 x y)  => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
   331  (Rsh16Ux8 x y) => (SRW  (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
   332  (Lsh16x8 x y)  => (SLW  x                 (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
   333  
   334  (Rsh8x8 x y)  => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
   335  (Rsh8Ux8 x y) => (SRW  (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
   336  (Lsh8x8 x y)  => (SLW  x                (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
   337  
   338  // Cleaning up shift ops
   339  (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y)
   340  (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
   341  (ORN x (MOVDconst [-1])) => x
   342  
   343  (S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
   344  (S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
   345  
   346  (Addr {sym} base) => (MOVDaddr {sym} [0] base)
   347  (LocalAddr {sym} base _) => (MOVDaddr {sym} base)
   348  (OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
   349  
   350  // TODO: optimize these cases?
   351  (Ctz32NonZero ...) => (Ctz32 ...)
   352  (Ctz64NonZero ...) => (Ctz64 ...)
   353  
   354  (Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   355  (Ctz64 x) => (CNTTZD x)
   356  (Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   357  (Ctz32 x) => (CNTTZW (MOVWZreg x))
   358  (Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
   359  (Ctz8 x)  => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
   360  
   361  (BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
   362  (BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
   363  
   364  (PopCount64 ...) => (POPCNTD ...)
   365  (PopCount32 x) => (POPCNTW (MOVWZreg x))
   366  (PopCount16 x) => (POPCNTW (MOVHZreg x))
   367  (PopCount8 x) => (POPCNTB (MOVBZreg x))
   368  
   369  (And(64|32|16|8) ...) => (AND ...)
   370  (Or(64|32|16|8) ...) => (OR ...)
   371  (Xor(64|32|16|8) ...) => (XOR ...)
   372  
   373  (Neg(64|32|16|8) ...) => (NEG ...)
   374  (Neg64F ...) => (FNEG ...)
   375  (Neg32F ...) => (FNEG ...)
   376  
   377  (Com(64|32|16|8) x) => (NOR x x)
   378  
   379  // Lowering boolean ops
   380  (AndB ...) => (AND ...)
   381  (OrB ...) => (OR ...)
   382  (Not x) => (XORconst [1] x)
   383  
   384  // Use ANDN for AND x NOT y
   385  (AND x (NOR y y)) => (ANDN x y)
   386  
   387  // Lowering comparisons
   388  (EqB x y)  => (ANDconst [1] (EQV x y))
   389  // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   390  (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   391  (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   392  (Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   393  (Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   394  (Eq32 x y) => (Equal (CMPW x y))
   395  (Eq64 x y) => (Equal (CMP x y))
   396  (Eq32F x y) => (Equal (FCMPU x y))
   397  (Eq64F x y) => (Equal (FCMPU x y))
   398  (EqPtr x y) => (Equal (CMP x y))
   399  
   400  (NeqB ...) => (XOR ...)
   401  // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   402  (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   403  (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   404  (Neq8 x y)  => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
   405  (Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   406  (Neq32 x y) => (NotEqual (CMPW x y))
   407  (Neq64 x y) => (NotEqual (CMP x y))
   408  (Neq32F x y) => (NotEqual (FCMPU x y))
   409  (Neq64F x y) => (NotEqual (FCMPU x y))
   410  (NeqPtr x y) => (NotEqual (CMP x y))
   411  
   412  (Less8 x y)  => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   413  (Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   414  (Less32 x y) => (LessThan (CMPW x y))
   415  (Less64 x y) => (LessThan (CMP x y))
   416  (Less32F x y) => (FLessThan (FCMPU x y))
   417  (Less64F x y) => (FLessThan (FCMPU x y))
   418  
   419  (Less8U x y)  => (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   420  (Less16U x y) => (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   421  (Less32U x y) => (LessThan (CMPWU x y))
   422  (Less64U x y) => (LessThan (CMPU x y))
   423  
   424  (Leq8 x y)  => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
   425  (Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   426  (Leq32 x y) => (LessEqual (CMPW x y))
   427  (Leq64 x y) => (LessEqual (CMP x y))
   428  (Leq32F x y) => (FLessEqual (FCMPU x y))
   429  (Leq64F x y) => (FLessEqual (FCMPU x y))
   430  
   431  (Leq8U x y)  => (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
   432  (Leq16U x y) => (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
   433  (Leq32U x y) => (LessEqual (CMPWU x y))
   434  (Leq64U x y) => (LessEqual (CMPU x y))
   435  
   436  // Absorb pseudo-ops into blocks.
   437  (If (Equal cc) yes no) => (EQ cc yes no)
   438  (If (NotEqual cc) yes no) => (NE cc yes no)
   439  (If (LessThan cc) yes no) => (LT cc yes no)
   440  (If (LessEqual cc) yes no) => (LE cc yes no)
   441  (If (GreaterThan cc) yes no) => (GT cc yes no)
   442  (If (GreaterEqual cc) yes no) => (GE cc yes no)
   443  (If (FLessThan cc) yes no) => (FLT cc yes no)
   444  (If (FLessEqual cc) yes no) => (FLE cc yes no)
   445  (If (FGreaterThan cc) yes no) => (FGT cc yes no)
   446  (If (FGreaterEqual cc) yes no) => (FGE cc yes no)
   447  
   448  (If cond yes no) => (NE (CMPWconst [0] cond) yes no)
   449  
   450  // Absorb boolean tests into block
   451  (NE (CMPWconst [0] (Equal cc)) yes no) => (EQ cc yes no)
   452  (NE (CMPWconst [0] (NotEqual cc)) yes no) => (NE cc yes no)
   453  (NE (CMPWconst [0] (LessThan cc)) yes no) => (LT cc yes no)
   454  (NE (CMPWconst [0] (LessEqual cc)) yes no) => (LE cc yes no)
   455  (NE (CMPWconst [0] (GreaterThan cc)) yes no) => (GT cc yes no)
   456  (NE (CMPWconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no)
   457  (NE (CMPWconst [0] (FLessThan cc)) yes no) => (FLT cc yes no)
   458  (NE (CMPWconst [0] (FLessEqual cc)) yes no) => (FLE cc yes no)
   459  (NE (CMPWconst [0] (FGreaterThan cc)) yes no) => (FGT cc yes no)
   460  (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) => (FGE cc yes no)
   461  
   462  // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
   463  (EQ (CMPconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
   464  (NE (CMPconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
   465  (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
   466  (NE (CMPWconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
   467  
   468  // absorb flag constants into branches
   469  (EQ (FlagEQ) yes no) => (First yes no)
   470  (EQ (FlagLT) yes no) => (First no yes)
   471  (EQ (FlagGT) yes no) => (First no yes)
   472  
   473  (NE (FlagEQ) yes no) => (First no yes)
   474  (NE (FlagLT) yes no) => (First yes no)
   475  (NE (FlagGT) yes no) => (First yes no)
   476  
   477  (LT (FlagEQ) yes no) => (First no yes)
   478  (LT (FlagLT) yes no) => (First yes no)
   479  (LT (FlagGT) yes no) => (First no yes)
   480  
   481  (LE (FlagEQ) yes no) => (First yes no)
   482  (LE (FlagLT) yes no) => (First yes no)
   483  (LE (FlagGT) yes no) => (First no yes)
   484  
   485  (GT (FlagEQ) yes no) => (First no yes)
   486  (GT (FlagLT) yes no) => (First no yes)
   487  (GT (FlagGT) yes no) => (First yes no)
   488  
   489  (GE (FlagEQ) yes no) => (First yes no)
   490  (GE (FlagLT) yes no) => (First no yes)
   491  (GE (FlagGT) yes no) => (First yes no)
   492  
   493  // absorb InvertFlags into branches
   494  (LT (InvertFlags cmp) yes no) => (GT cmp yes no)
   495  (GT (InvertFlags cmp) yes no) => (LT cmp yes no)
   496  (LE (InvertFlags cmp) yes no) => (GE cmp yes no)
   497  (GE (InvertFlags cmp) yes no) => (LE cmp yes no)
   498  (EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
   499  (NE (InvertFlags cmp) yes no) => (NE cmp yes no)
   500  
   501  // constant comparisons
   502  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
   503  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  => (FlagLT)
   504  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  => (FlagGT)
   505  
   506  (CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
   507  (CMPconst (MOVDconst [x]) [y]) && x<y  => (FlagLT)
   508  (CMPconst (MOVDconst [x]) [y]) && x>y  => (FlagGT)
   509  
   510  (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  => (FlagEQ)
   511  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
   512  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
   513  
   514  (CMPUconst (MOVDconst [x]) [y]) && x==y  => (FlagEQ)
   515  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
   516  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
   517  
   518  // other known comparisons
   519  //(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagLT)
   520  //(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagLT)
   521  //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) => (FlagLT)
   522  //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagLT)
   523  
   524  // absorb flag constants into boolean values
   525  (Equal (FlagEQ)) => (MOVDconst [1])
   526  (Equal (FlagLT)) => (MOVDconst [0])
   527  (Equal (FlagGT)) => (MOVDconst [0])
   528  
   529  (NotEqual (FlagEQ)) => (MOVDconst [0])
   530  (NotEqual (FlagLT)) => (MOVDconst [1])
   531  (NotEqual (FlagGT)) => (MOVDconst [1])
   532  
   533  (LessThan (FlagEQ)) => (MOVDconst [0])
   534  (LessThan (FlagLT)) => (MOVDconst [1])
   535  (LessThan (FlagGT)) => (MOVDconst [0])
   536  
   537  (LessEqual (FlagEQ)) => (MOVDconst [1])
   538  (LessEqual (FlagLT)) => (MOVDconst [1])
   539  (LessEqual (FlagGT)) => (MOVDconst [0])
   540  
   541  (GreaterThan (FlagEQ)) => (MOVDconst [0])
   542  (GreaterThan (FlagLT)) => (MOVDconst [0])
   543  (GreaterThan (FlagGT)) => (MOVDconst [1])
   544  
   545  (GreaterEqual (FlagEQ)) => (MOVDconst [1])
   546  (GreaterEqual (FlagLT)) => (MOVDconst [0])
   547  (GreaterEqual (FlagGT)) => (MOVDconst [1])
   548  
   549  // absorb InvertFlags into boolean values
   550  (Equal (InvertFlags x)) => (Equal x)
   551  (NotEqual (InvertFlags x)) => (NotEqual x)
   552  (LessThan (InvertFlags x)) => (GreaterThan x)
   553  (GreaterThan (InvertFlags x)) => (LessThan x)
   554  (LessEqual (InvertFlags x)) => (GreaterEqual x)
   555  (GreaterEqual (InvertFlags x)) => (LessEqual x)
   556  
   557  // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
   558  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
   559  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
   560  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
   561  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
   562  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
   563  
   564  // Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
   565  (CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPWconst [0] bool))
   566  // Fold any CR -> GPR -> CR transfers when applying the above rule.
   567  (ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp))) => (ISEL [c] x y cmp)
   568  
   569  // Lowering loads
   570  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   571  (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
   572  (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
   573  (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
   574  (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
   575  (Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
   576  (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   577  (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) => (MOVBZload ptr mem)
   578  
   579  (Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   580  (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   581  
   582  (Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
   583  (Store {t} ptr val mem) && t.Size() == 8 && is32BitFloat(val.Type) => (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) => x -- type is wrong
   584  (Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
   585  (Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
   586  (Store {t} ptr val mem) && t.Size() == 4 && is32BitInt(val.Type) => (MOVWstore ptr val mem)
   587  (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   588  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   589  
   590  // Using Zero instead of LoweredZero allows the
   591  // target address to be folded where possible.
   592  (Zero [0] _ mem) => mem
   593  (Zero [1] destptr mem) => (MOVBstorezero destptr mem)
   594  (Zero [2] destptr mem) =>
   595  	(MOVHstorezero destptr mem)
   596  (Zero [3] destptr mem) =>
   597  	(MOVBstorezero [2] destptr
   598  		(MOVHstorezero destptr mem))
   599  (Zero [4] destptr mem) =>
   600  	(MOVWstorezero destptr mem)
   601  (Zero [5] destptr mem) =>
   602  	(MOVBstorezero [4] destptr
   603          	(MOVWstorezero destptr mem))
   604  (Zero [6] destptr mem) =>
   605  	(MOVHstorezero [4] destptr
   606  		(MOVWstorezero destptr mem))
   607  (Zero [7] destptr mem) =>
   608  	(MOVBstorezero [6] destptr
   609  		(MOVHstorezero [4] destptr
   610  			(MOVWstorezero destptr mem)))
   611  
   612  (Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem)
   613  (Zero [12] {t} destptr mem) =>
   614          (MOVWstorezero [8] destptr
   615                  (MOVDstorezero [0] destptr mem))
   616  (Zero [16] {t} destptr mem) =>
   617         (MOVDstorezero [8] destptr
   618                  (MOVDstorezero [0] destptr mem))
   619  (Zero [24] {t} destptr mem) =>
   620         (MOVDstorezero [16] destptr
   621                 (MOVDstorezero [8] destptr
   622                         (MOVDstorezero [0] destptr mem)))
   623  (Zero [32] {t} destptr mem) =>
   624         (MOVDstorezero [24] destptr
   625                 (MOVDstorezero [16] destptr
   626                         (MOVDstorezero [8] destptr
   627                                 (MOVDstorezero [0] destptr mem))))
   628  
   629  // Handle cases not handled above
   630  // Lowered Short cases do not generate loops, and as a result don't clobber
   631  // the address registers or flags.
   632  (Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
   633  (Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
   634  (Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
   635  (Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
   636  
   637  // moves
   638  (Move [0] _ _ mem) => mem
   639  (Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
   640  (Move [2] dst src mem) =>
   641          (MOVHstore dst (MOVHZload src mem) mem)
   642  (Move [4] dst src mem) =>
   643  	(MOVWstore dst (MOVWZload src mem) mem)
   644  // MOVD for load and store must have offsets that are multiple of 4
   645  (Move [8] {t} dst src mem) =>
   646  	(MOVDstore dst (MOVDload src mem) mem)
   647  (Move [3] dst src mem) =>
   648          (MOVBstore [2] dst (MOVBZload [2] src mem)
   649                  (MOVHstore dst (MOVHload src mem) mem))
   650  (Move [5] dst src mem) =>
   651          (MOVBstore [4] dst (MOVBZload [4] src mem)
   652                  (MOVWstore dst (MOVWZload src mem) mem))
   653  (Move [6] dst src mem) =>
   654          (MOVHstore [4] dst (MOVHZload [4] src mem)
   655                  (MOVWstore dst (MOVWZload src mem) mem))
   656  (Move [7] dst src mem) =>
   657          (MOVBstore [6] dst (MOVBZload [6] src mem)
   658                  (MOVHstore [4] dst (MOVHZload [4] src mem)
   659                          (MOVWstore dst (MOVWZload src mem) mem)))
   660  
   661  // Large move uses a loop. Since the address is computed and the
   662  // offset is zero, any alignment can be used.
   663  (Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
   664          (LoweredMove [s] dst src mem)
   665  (Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
   666          (LoweredQuadMoveShort [s] dst src mem)
   667  (Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
   668          (LoweredQuadMove [s] dst src mem)
   669  
   670  // Calls
   671  // Lowering calls
   672  (StaticCall ...) => (CALLstatic ...)
   673  (ClosureCall ...) => (CALLclosure ...)
   674  (InterCall ...) => (CALLinter ...)
   675  (TailCall ...) => (CALLtail ...)
   676  
   677  // Miscellaneous
   678  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   679  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   680  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   681  (IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
   682  (IsInBounds idx len) => (LessThan (CMPU idx len))
   683  (IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
   684  (NilCheck ...) => (LoweredNilCheck ...)
   685  
   686  // Write barrier.
   687  (WB ...) => (LoweredWB ...)
   688  
   689  (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   690  (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   691  (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   692  
   693  // Optimizations
   694  // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   695  // so ORconst, XORconst easily expand into a pair.
   696  
   697  // Include very-large constants in the const-const case.
   698  (AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
   699  (OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
   700  (XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
   701  (ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
   702  (ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
   703  (NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
   704  
   705  // Discover consts
   706  (AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
   707  (XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
   708  (OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
   709  
   710  // Simplify consts
   711  (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
   712  (ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
   713  (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
   714  (ANDconst [-1] x) => x
   715  (ANDconst [0] _) => (MOVDconst [0])
   716  (XORconst [0] x) => x
   717  (ORconst [-1] _) => (MOVDconst [-1])
   718  (ORconst [0] x) => x
   719  
   720  // zero-extend of small and => small and
   721  (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
   722  (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
   723  (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
   724  (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
   725  
   726  // sign extend of small-positive and => small-positive-and
   727  (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
   728  (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
   729  (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   730  (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
   731  
   732  // small and of zero-extend => either zero-extend or small and
   733  (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
   734  (ANDconst [0xFF] y:(MOVBreg _)) => y
   735  (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF => y
   736  (ANDconst [0xFFFF] y:(MOVHreg _)) => y
   737  
   738  (AND (MOVDconst [c]) y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF => y
   739  (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
   740  // normal case
   741  (ANDconst [c] (MOV(B|BZ)reg x)) => (ANDconst [c&0xFF] x)
   742  (ANDconst [c] (MOV(H|HZ)reg x)) => (ANDconst [c&0xFFFF] x)
   743  (ANDconst [c] (MOV(W|WZ)reg x)) => (ANDconst [c&0xFFFFFFFF] x)
   744  
   745  // Eliminate unnecessary sign/zero extend following right shift
   746  (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
   747  (MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
   748  (MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
   749  (MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
   750  (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
   751  (MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
   752  
   753  (MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 => (SRWconst [c] x)
   754  (MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 => (SRWconst [c] x)
   755  (MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 => (SRWconst [c] x)
   756  (MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 => (SRAWconst [c] x)
   757  (MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 => (SRAWconst [c] x)
   758  (MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 => (SRAWconst [c] x)
   759  
   760  // initial right shift will handle sign/zero extend
   761  (MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
   762  (MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
   763  (MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
   764  (MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
   765  (MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
   766  (MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
   767  (MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
   768  (MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
   769  
   770  (MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
   771  (MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
   772  (MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
   773  (MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
   774  (MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
   775  (MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
   776  (MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
   777  (MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
   778  
   779  (MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
   780  (MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
   781  (MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
   782  (MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
   783  
   784  // Various redundant zero/sign extension combinations.
   785  (MOVBZreg y:(MOVBZreg _)) => y  // repeat
   786  (MOVBreg y:(MOVBreg _)) => y // repeat
   787  (MOVBreg (MOVBZreg x)) => (MOVBreg x)
   788  (MOVBZreg (MOVBreg x)) => (MOVBZreg x)
   789  
   790  // H - there are more combinations than these
   791  
   792  (MOVHZreg y:(MOVHZreg _)) => y // repeat
   793  (MOVHZreg y:(MOVBZreg _)) => y // wide of narrow
   794  (MOVHZreg y:(MOVHBRload _ _)) => y
   795  
   796  (MOVHreg y:(MOVHreg _)) => y // repeat
   797  (MOVHreg y:(MOVBreg _)) => y // wide of narrow
   798  
   799  (MOVHreg y:(MOVHZreg x)) => (MOVHreg x)
   800  (MOVHZreg y:(MOVHreg x)) => (MOVHZreg x)
   801  
   802  // W - there are more combinations than these
   803  
   804  (MOVWZreg y:(MOVWZreg _)) => y // repeat
   805  (MOVWZreg y:(MOVHZreg _)) => y // wide of narrow
   806  (MOVWZreg y:(MOVBZreg _)) => y // wide of narrow
   807  (MOVWZreg y:(MOVHBRload _ _)) => y
   808  (MOVWZreg y:(MOVWBRload _ _)) => y
   809  
   810  (MOVWreg y:(MOVWreg _)) => y // repeat
   811  (MOVWreg y:(MOVHreg _)) => y // wide of narrow
   812  (MOVWreg y:(MOVBreg _)) => y // wide of narrow
   813  
   814  (MOVWreg y:(MOVWZreg x)) => (MOVWreg x)
   815  (MOVWZreg y:(MOVWreg x)) => (MOVWZreg x)
   816  
   817  // Truncate then logical then truncate: omit first, lesser or equal truncate
   818  (MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
   819  (MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   820  (MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   821  (MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   822  (MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   823  (MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   824  
   825  (MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
   826  (MOVBZreg z:(AND y (MOVBZload ptr x))) => z
   827  (MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
   828  (MOVHZreg z:(AND y (MOVHZload ptr x))) => z
   829  (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
   830  (MOVWZreg z:(AND y (MOVWZload ptr x))) => z
   831  
   832  // Arithmetic constant ops
   833  
   834  (ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
   835  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
   836  (ADDconst [0] x) => x
   837  (SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
   838  
   839  (ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
   840  (ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
   841  
   842  (MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
   843  
   844  // Subtract from (with carry, but ignored) constant.
   845  // Note, these clobber the carry bit.
   846  (SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
   847  (SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
   848  (SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
   849  (SUBFCconst [0] x) => (NEG x)
   850  (ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
   851  (NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
   852  (NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
   853  (NEG (SUB x y)) => (SUB y x)
   854  (NEG (NEG x)) => x
   855  
   856  // Use register moves instead of stores and loads to move int<=>float values
   857  // Common with math Float64bits, Float64frombits
   858  (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
   859  (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
   860  
   861  (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
   862  (MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
   863  
   864  (MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
   865  (MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
   866  
   867  (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
   868  (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
   869  
   870  // Fold offsets for stores.
   871  (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} x val mem)
   872  (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} x val mem)
   873  (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} x val mem)
   874  (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} x val mem)
   875  
   876  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
   877  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
   878  
   879  // Fold address into load/store.
   880  // The assembler needs to generate several instructions and use
   881  // temp register for accessing global, and each time it will reload
   882  // the temp register. So don't fold address of global, unless there
   883  // is only one use.
   884  (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   885  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   886          (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   887  (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   888  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   889          (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   890  (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   891  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   892          (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   893  (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   894  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   895          (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   896  
   897  (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   898  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   899          (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   900  (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   901  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   902          (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   903  
   904  (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   905  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   906          (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   907  (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   908  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   909          (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   910  (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   911  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   912          (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   913  (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   914  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   915          (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   916  (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   917  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   918          (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   919  (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   920  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   921          (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   922  (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   923  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   924          (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   925  (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   926  	&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
   927          (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   928  
   929  // Fold offsets for loads.
   930  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVSload [off1+int32(off2)] {sym} ptr mem)
   931  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVDload [off1+int32(off2)] {sym} ptr mem)
   932  
   933  (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} x mem)
   934  (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} x mem)
   935  (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWZload [off1+int32(off2)] {sym} x mem)
   936  (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} x mem)
   937  (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHZload [off1+int32(off2)] {sym} x mem)
   938  (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVBZload [off1+int32(off2)] {sym} x mem)
   939  
   940  // Determine load + addressing that can be done as a register indexed load
   941  (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
   942  
   943  // Determine if there is benefit to using a non-indexed load, since that saves the load
   944  // of the index register. With MOVDload and MOVWload, there is no benefit if the offset
   945  // value is not a multiple of 4, since that results in an extra instruction in the base
   946  // register address computation.
   947  (MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
   948  (MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   949  (MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
   950  (MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   951  
   952  // Store of zero => storezero
   953  (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
   954  (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
   955  (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
   956  (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
   957  
   958  // Fold offsets for storezero
   959  (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
   960      (MOVDstorezero [off1+int32(off2)] {sym} x mem)
   961  (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
   962      (MOVWstorezero [off1+int32(off2)] {sym} x mem)
   963  (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
   964      (MOVHstorezero [off1+int32(off2)] {sym} x mem)
   965  (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
   966      (MOVBstorezero [off1+int32(off2)] {sym} x mem)
   967  
   968  // Stores with addressing that can be done as indexed stores
   969  (MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
   970  
   971  // Stores with constant index values can be done without indexed instructions
   972  // No need to lower the idx cases if c%4 is not 0
   973  (MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
   974  (MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   975  (MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
   976  (MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   977  
   978  // Fold symbols into storezero
   979  (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   980  	&& (x.Op != OpSB || p.Uses == 1) =>
   981      (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   982  (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   983  	&& (x.Op != OpSB || p.Uses == 1) =>
   984      (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   985  (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   986  	&& (x.Op != OpSB || p.Uses == 1) =>
   987      (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   988  (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   989  	&& (x.Op != OpSB || p.Uses == 1) =>
   990      (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   991  
   992  // atomic intrinsics
   993  (AtomicLoad(8|32|64|Ptr)  ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
   994  (AtomicLoadAcq(32|64)     ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
   995  
   996  (AtomicStore(8|32|64)    ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
   997  (AtomicStoreRel(32|64)   ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
   998  //(AtomicStorePtrNoWB ptr val mem) => (STLR  ptr val mem)
   999  
  1000  (AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
  1001  
  1002  (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
  1003  
  1004  (AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
  1005  (AtomicCompareAndSwapRel32   ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
  1006  
  1007  (AtomicAnd8  ...) => (LoweredAtomicAnd8  ...)
  1008  (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
  1009  (AtomicOr8   ...) => (LoweredAtomicOr8   ...)
  1010  (AtomicOr32  ...) => (LoweredAtomicOr32  ...)
  1011  
  1012  (Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
  1013  
  1014  // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
  1015  // This may interact with other patterns in the future. (Compare with arm64)
  1016  (MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
  1017  (MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
  1018  (MOV(H|W)Zreg x:(MOVHZload _ _)) => x
  1019  (MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
  1020  (MOV(H|W)reg x:(MOVHload _ _)) => x
  1021  (MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
  1022  (MOVWZreg x:(MOVWZload _ _)) => x
  1023  (MOVWZreg x:(MOVWZloadidx _ _ _)) => x
  1024  (MOVWreg x:(MOVWload _ _)) => x
  1025  (MOVWreg x:(MOVWloadidx _ _ _)) => x
  1026  (MOVBZreg x:(Select0 (LoweredAtomicLoad8 _ _))) => x
  1027  (MOVWZreg x:(Select0 (LoweredAtomicLoad32 _ _))) => x
  1028  
  1029  // don't extend if argument is already extended
  1030  (MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) => x
  1031  (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) => x
  1032  (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) => x
  1033  (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) => x
  1034  (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) => x
  1035  (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) => x
  1036  
  1037  (MOVBZreg (MOVDconst [c]))  => (MOVDconst [int64(uint8(c))])
  1038  (MOVBreg (MOVDconst [c]))  => (MOVDconst [int64(int8(c))])
  1039  (MOVHZreg (MOVDconst [c]))  => (MOVDconst [int64(uint16(c))])
  1040  (MOVHreg (MOVDconst [c]))  => (MOVDconst [int64(int16(c))])
  1041  (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
  1042  (MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
  1043  
  1044  // Implement clrsldi and clrslwi extended mnemonics as described in
  1045  // ISA 3.0 section C.8. AuxInt field contains values needed for
  1046  // the instructions, packed together since there is only one available.
  1047  (SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
  1048  (SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
  1049  (SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
  1050  
  1051  (SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
  1052  (SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
  1053  (SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
  1054  (SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
  1055  (SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
  1056  (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
  1057  // special case for power9
  1058  (SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
  1059  
  1060  // Lose widening ops fed to stores
  1061  (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
  1062  (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
  1063  (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
  1064  (MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
  1065  (MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
  1066  (MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
  1067  (MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
  1068  (MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
  1069  (MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
  1070  (MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
  1071  (MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore {sym} ptr x mem)
  1072  (MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore {sym} ptr x mem)
  1073  
  1074  // Lose W-widening ops fed to compare-W
  1075  (CMPW x (MOVWreg y)) => (CMPW x y)
  1076  (CMPW (MOVWreg x) y) => (CMPW x y)
  1077  (CMPWU x (MOVWZreg y)) => (CMPWU x y)
  1078  (CMPWU (MOVWZreg x) y) => (CMPWU x y)
  1079  
  1080  (CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
  1081  (CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
  1082  (CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
  1083  (CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
  1084  
  1085  (CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
  1086  (CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
  1087  (CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
  1088  (CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
  1089  
  1090  // Canonicalize the order of arguments to comparisons - helps with CSE.
  1091  ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
  1092  
  1093  // ISEL auxInt values 0=LT 1=GT 2=EQ   arg2 ? arg0 : arg1
  1094  // ISEL auxInt values 4=GE 5=LE 6=NE   !arg2 ? arg1 : arg0
  1095  // ISELB special case where arg0, arg1 values are 0, 1
  1096  
  1097  (Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
  1098  (NotEqual cmp) => (ISELB [6] (MOVDconst [1]) cmp)
  1099  (LessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
  1100  (FLessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
  1101  (FLessEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
  1102  (GreaterEqual cmp) => (ISELB [4] (MOVDconst [1]) cmp)
  1103  (GreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
  1104  (FGreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
  1105  (FGreaterEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
  1106  (LessEqual cmp) => (ISELB [5] (MOVDconst [1]) cmp)
  1107  
  1108  (ISELB [0] _ (FlagLT)) => (MOVDconst [1])
  1109  (ISELB [0] _ (Flag(GT|EQ))) => (MOVDconst [0])
  1110  (ISELB [1] _ (FlagGT)) => (MOVDconst [1])
  1111  (ISELB [1] _ (Flag(LT|EQ))) => (MOVDconst [0])
  1112  (ISELB [2] _ (FlagEQ)) => (MOVDconst [1])
  1113  (ISELB [2] _ (Flag(LT|GT))) => (MOVDconst [0])
  1114  (ISELB [4] _ (FlagLT)) => (MOVDconst [0])
  1115  (ISELB [4] _ (Flag(GT|EQ))) => (MOVDconst [1])
  1116  (ISELB [5] _ (FlagGT)) => (MOVDconst [0])
  1117  (ISELB [5] _ (Flag(LT|EQ))) => (MOVDconst [1])
  1118  (ISELB [6] _ (FlagEQ)) => (MOVDconst [0])
  1119  (ISELB [6] _ (Flag(LT|GT))) => (MOVDconst [1])
  1120  
  1121  (ISEL [2] x _ (FlagEQ)) => x
  1122  (ISEL [2] _ y (Flag(LT|GT))) => y
  1123  
  1124  (ISEL [6] _ y (FlagEQ)) => y
  1125  (ISEL [6] x _ (Flag(LT|GT))) => x
  1126  
  1127  (ISEL [0] _ y (Flag(EQ|GT))) => y
  1128  (ISEL [0] x _ (FlagLT)) => x
  1129  
  1130  (ISEL [5] _ x (Flag(EQ|LT))) => x
  1131  (ISEL [5] y _ (FlagGT)) => y
  1132  
  1133  (ISEL [1] _ y (Flag(EQ|LT))) => y
  1134  (ISEL [1] x _ (FlagGT)) => x
  1135  
  1136  (ISEL [4] x _ (Flag(EQ|GT))) => x
  1137  (ISEL [4] _ y (FlagLT)) => y
  1138  
  1139  (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool)
  1140  (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool)
  1141  (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool)
  1142  (ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
  1143  (ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
  1144  (ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
  1145  (XORconst [1] (ISELB [6] (MOVDconst [1]) cmp)) => (ISELB [2] (MOVDconst [1]) cmp)
  1146  (XORconst [1] (ISELB [5] (MOVDconst [1]) cmp)) => (ISELB [1] (MOVDconst [1]) cmp)
  1147  (XORconst [1] (ISELB [4] (MOVDconst [1]) cmp)) => (ISELB [0] (MOVDconst [1]) cmp)
  1148  
  1149  // A particular pattern seen in cgo code:
  1150  (AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
  1151  
  1152  // floating point negative abs
  1153  (FNEG (FABS x)) => (FNABS x)
  1154  (FNEG (FNABS x)) => (FABS x)
  1155  
  1156  // floating-point fused multiply-add/sub
  1157  (FADD (FMUL x y) z) => (FMADD x y z)
  1158  (FSUB (FMUL x y) z) => (FMSUB x y z)
  1159  (FADDS (FMULS x y) z) => (FMADDS x y z)
  1160  (FSUBS (FMULS x y) z) => (FMSUBS x y z)
  1161  
  1162  
  1163  // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
  1164  // and convert the statements in these functions from multiple single byte loads or stores to
  1165  // the single largest possible load or store.
  1166  // Some are marked big or little endian based on the order in which the bytes are loaded or stored,
  1167  // not on the ordering of the machine. These are intended for little endian machines.
  1168  // To implement for big endian machines, most rules would have to be duplicated but the
  1169  // resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
  1170  // and vice versa.
  1171  // b[0] | b[1]<<8 => load 16-bit Little endian
  1172  (OR <t> x0:(MOVBZload [i0] {s} p mem)
  1173  	o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
  1174  	&& !config.BigEndian
  1175  	&& i1 == i0+1
  1176  	&& x0.Uses ==1 && x1.Uses == 1
  1177  	&& o1.Uses == 1
  1178  	&& mergePoint(b, x0, x1) != nil
  1179  	&& clobber(x0, x1, o1)
  1180  	 => @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
  1181  
  1182  // b[0]<<8 | b[1] => load 16-bit Big endian on Little endian arch.
  1183  // Use byte-reverse indexed load for 2 bytes.
  1184  (OR <t> x0:(MOVBZload [i1] {s} p mem)
  1185  	o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
  1186  	&& !config.BigEndian
  1187  	&& i1 == i0+1
  1188  	&& x0.Uses ==1 && x1.Uses == 1
  1189  	&& o1.Uses == 1
  1190  	&& mergePoint(b, x0, x1) != nil
  1191  	&& clobber(x0, x1, o1)
  1192  	  => @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1193  
  1194  // b[0]<<n+8 | b[1]<<n => load 16-bit Big endian (where n%8== 0)
  1195  // Use byte-reverse indexed load for 2 bytes,
  1196  // then shift left to the correct position. Used to match subrules
  1197  // from longer rules.
  1198  (OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1])
  1199  	s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2]))
  1200  	&& !config.BigEndian
  1201  	&& i1 == i0+1
  1202  	&& n1%8 == 0
  1203  	&& n2 == n1+8
  1204  	&& x0.Uses == 1 && x1.Uses == 1
  1205  	&& s0.Uses == 1 && s1.Uses == 1
  1206  	&& mergePoint(b, x0, x1) != nil
  1207  	&& clobber(x0, x1, s0, s1)
  1208  	  => @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
  1209  
  1210  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit Little endian
  1211  // Use byte-reverse indexed load for 4 bytes.
  1212  (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
  1213  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
  1214  	x0:(MOVHZload [i0] {s} p mem)))
  1215  	&& !config.BigEndian
  1216  	&& i2 == i0+2
  1217  	&& i3 == i0+3
  1218  	&& x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
  1219  	&& o0.Uses == 1
  1220  	&& s0.Uses == 1 && s1.Uses == 1
  1221  	&& mergePoint(b, x0, x1, x2) != nil
  1222  	&& clobber(x0, x1, x2, s0, s1, o0)
  1223  	 => @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
  1224  
  1225  // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit Big endian order on Little endian arch
  1226  // Use byte-reverse indexed load for 4 bytes with computed address.
  1227  // Could be used to match subrules of a longer rule.
  1228  (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
  1229  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16])
  1230  	x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
  1231  	&& !config.BigEndian
  1232  	&& i1 == i0+1
  1233  	&& i2 == i0+2
  1234  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1235  	&& o0.Uses == 1
  1236  	&& s0.Uses == 1 && s1.Uses == 1
  1237  	&& mergePoint(b, x0, x1, x2) != nil
  1238  	&& clobber(x0, x1, x2, s0, s1, o0)
  1239  	  => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1240  
  1241  // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit Big endian order on Little endian arch
  1242  // Use byte-reverse indexed load for 4 bytes with computed address.
  1243  // Could be used to match subrules of a longer rule.
  1244  (OR <t> x0:(MOVBZload [i3] {s} p mem)
  1245  	o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8])
  1246  	s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
  1247  	&& !config.BigEndian
  1248  	&& i2 == i0+2
  1249  	&& i3 == i0+3
  1250  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1251  	&& o0.Uses == 1
  1252  	&& s0.Uses == 1 && s1.Uses == 1
  1253  	&& mergePoint(b, x0, x1, x2) != nil
  1254  	&& clobber(x0, x1, x2, s0, s1, o0)
  1255  	  => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1256  
  1257  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 => load 32-bit Big endian order on Little endian arch
  1258  // Use byte-reverse indexed load to for 4 bytes with computed address.
  1259  // Used to match longer rules.
  1260  (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
  1261  	o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])
  1262  	s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
  1263  	&& !config.BigEndian
  1264  	&& i2 == i0+2
  1265  	&& i3 == i0+3
  1266  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1267  	&& o0.Uses == 1
  1268  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
  1269  	&& mergePoint(b, x0, x1, x2) != nil
  1270  	&& clobber(x0, x1, x2, s0, s1, s2, o0)
  1271  	  => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
  1272  
  1273  // b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 32-bit Big endian order on Little endian arch
  1274  // Use byte-reverse indexed load for 4 bytes with constant address.
  1275  // Used to match longer rules.
  1276  (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
  1277          o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
  1278          s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
  1279          && !config.BigEndian
  1280          && i1 == i0+1
  1281          && i2 == i0+2
  1282          && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1283          && o0.Uses == 1
  1284          && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
  1285          && mergePoint(b, x0, x1, x2) != nil
  1286          && clobber(x0, x1, x2, s0, s1, s2, o0)
  1287            => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
  1288  
  1289  // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit Little endian
  1290  // Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
  1291  // so matching shorter previously defined subrules is important.
  1292  // Offset must be multiple of 4 for MOVD
  1293  (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
  1294  	o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
  1295  	o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
  1296  	o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
  1297  	x0:(MOVWZload {s} [i0] p mem)))))
  1298  	&& !config.BigEndian
  1299  	&& i4 == i0+4
  1300  	&& i5 == i0+5
  1301  	&& i6 == i0+6
  1302  	&& i7 == i0+7
  1303  	&& x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
  1304  	&& o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
  1305  	&& s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
  1306  	&& mergePoint(b, x0, x4, x5, x6, x7) != nil
  1307  	&& clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
  1308  	  => @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
  1309  
  1310  // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
  1311  // Use byte-reverse indexed load of 8 bytes.
  1312  // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
  1313  // so matching shorter previously defined subrules is important.
  1314  (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])
  1315  	o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
  1316  	o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])
  1317  	o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])
  1318  	x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
  1319  	&& !config.BigEndian
  1320  	&& i1 == i0+1
  1321  	&& i2 == i0+2
  1322  	&& i3 == i0+3
  1323  	&& i4 == i0+4
  1324  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
  1325  	&& o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
  1326  	&& s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1327  	&& mergePoint(b, x0, x1, x2, x3, x4) != nil
  1328  	&& clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
  1329  	  => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1330  
  1331  // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit Big endian ordered bytes on Little endian arch
  1332  // Use byte-reverse indexed load of 8 bytes.
  1333  // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
  1334  // so matching shorter previously defined subrules is important.
  1335  (OR <t> x7:(MOVBZload [i7] {s} p mem)
  1336  	o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])
  1337  	o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])
  1338  	o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])
  1339  	s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
  1340  	&& !config.BigEndian
  1341  	&& i4 == i0+4
  1342  	&& i5 == i0+5
  1343  	&& i6 == i0+6
  1344  	&& i7 == i0+7
  1345  	&& x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
  1346  	&& o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
  1347  	&& s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
  1348  	&& mergePoint(b, x3, x4, x5, x6, x7) != nil
  1349  	&& clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
  1350  	=> @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
  1351  
  1352  // 2 byte store Little endian as in:
  1353  //      b[0] = byte(v >> 16)
  1354  //      b[1] = byte(v >> 24)
  1355  // Added for use in matching longer rules.
  1356  (MOVBstore [i1] {s} p (SR(W|D)const w [24])
  1357          x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem))
  1358          && !config.BigEndian
  1359          && x0.Uses == 1
  1360          && i1 == i0+1
  1361          && clobber(x0)
  1362            => (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
  1363  
  1364  // 2 byte store Little endian as in:
  1365  //      b[0] = byte(v)
  1366  //      b[1] = byte(v >> 8)
  1367  (MOVBstore [i1] {s} p (SR(W|D)const w [8])
  1368  	x0:(MOVBstore [i0] {s} p w mem))
  1369  	&& !config.BigEndian
  1370  	&& x0.Uses == 1
  1371  	&& i1 == i0+1
  1372  	&& clobber(x0)
  1373  	  => (MOVHstore [i0] {s} p w mem)
  1374  
  1375  // 4 byte store Little endian as in:
  1376  //     b[0:1] = uint16(v)
  1377  //     b[2:3] = uint16(v >> 16)
  1378  (MOVHstore [i1] {s} p (SR(W|D)const w [16])
  1379  	x0:(MOVHstore [i0] {s} p w mem))
  1380  	&& !config.BigEndian
  1381  	&& x0.Uses == 1
  1382  	&& i1 == i0+2
  1383  	&& clobber(x0)
  1384  	  => (MOVWstore [i0] {s} p w mem)
  1385  
  1386  // 4 byte store Big endian as in:
  1387  //     b[0] = byte(v >> 24)
  1388  //     b[1] = byte(v >> 16)
  1389  //     b[2] = byte(v >> 8)
  1390  //     b[3] = byte(v)
  1391  // Use byte-reverse indexed 4 byte store.
  1392  (MOVBstore [i3] {s} p w
  1393  	x0:(MOVBstore [i2] {s} p (SRWconst w [8])
  1394  	x1:(MOVBstore [i1] {s} p (SRWconst w [16])
  1395  	x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
  1396  	&& !config.BigEndian
  1397  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
  1398  	&& i1 == i0+1 && i2 == i0+2 && i3 == i0+3
  1399  	&& clobber(x0, x1, x2)
  1400  	  => (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1401  
  1402  // The 2 byte store appears after the 4 byte store so that the
  1403  // match for the 2 byte store is not done first.
  1404  // If the 4 byte store is based on the 2 byte store then there are
  1405  // variations on the MOVDaddr subrule that would require additional
  1406  // rules to be written.
  1407  
  1408  // 2 byte store Big endian as in:
  1409  //      b[0] = byte(v >> 8)
  1410  //      b[1] = byte(v)
  1411  (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
  1412  	&& !config.BigEndian
  1413  	&& x0.Uses == 1
  1414  	&& i1 == i0+1
  1415  	&& clobber(x0)
  1416  	  => (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1417  
  1418  // 8 byte store Little endian as in:
  1419  //	b[0] = byte(v)
  1420  //	b[1] = byte(v >> 8)
  1421  //	b[2] = byte(v >> 16)
  1422  //	b[3] = byte(v >> 24)
  1423  //	b[4] = byte(v >> 32)
  1424  //	b[5] = byte(v >> 40)
  1425  //	b[6] = byte(v >> 48)
  1426  //	b[7] = byte(v >> 56)
  1427  // Built on previously defined rules
  1428  // Offset must be multiple of 4 for MOVDstore
  1429  (MOVBstore [i7] {s} p (SRDconst w [56])
  1430  	x0:(MOVBstore [i6] {s} p (SRDconst w [48])
  1431  	x1:(MOVBstore [i5] {s} p (SRDconst w [40])
  1432  	x2:(MOVBstore [i4] {s} p (SRDconst w [32])
  1433  	x3:(MOVWstore [i0] {s} p w mem)))))
  1434  	&& !config.BigEndian
  1435  	&& x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
  1436  	&& i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1437  	&& clobber(x0, x1, x2, x3)
  1438  	  => (MOVDstore [i0] {s} p w mem)
  1439  
  1440  // 8 byte store Big endian as in:
  1441  //      b[0] = byte(v >> 56)
  1442  //      b[1] = byte(v >> 48)
  1443  //      b[2] = byte(v >> 40)
  1444  //      b[3] = byte(v >> 32)
  1445  //      b[4] = byte(v >> 24)
  1446  //      b[5] = byte(v >> 16)
  1447  //      b[6] = byte(v >> 8)
  1448  //      b[7] = byte(v)
  1449  // Use byte-reverse indexed 8 byte store.
  1450  (MOVBstore [i7] {s} p w
  1451          x0:(MOVBstore [i6] {s} p (SRDconst w [8])
  1452          x1:(MOVBstore [i5] {s} p (SRDconst w [16])
  1453          x2:(MOVBstore [i4] {s} p (SRDconst w [24])
  1454          x3:(MOVBstore [i3] {s} p (SRDconst w [32])
  1455          x4:(MOVBstore [i2] {s} p (SRDconst w [40])
  1456          x5:(MOVBstore [i1] {s} p (SRDconst w [48])
  1457          x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
  1458          && !config.BigEndian
  1459          && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
  1460          && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
  1461          && clobber(x0, x1, x2, x3, x4, x5, x6)
  1462            => (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
  1463  
  1464  // Arch-specific inlining for small or disjoint runtime.memmove
  1465  (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore  _ src s3:(MOVDstore {t} _ dst mem)))))
  1466          && sz >= 0
  1467          && isSameCall(sym, "runtime.memmove")
  1468          && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1469          && isInlinableMemmove(dst, src, sz, config)
  1470          && clobber(s1, s2, s3, call)
  1471          => (Move [sz] dst src mem)
  1472  
  1473  // Match post-lowering calls, register version.
  1474  (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
  1475          && sz >= 0
  1476          && isSameCall(sym, "runtime.memmove")
  1477          && call.Uses == 1
  1478          && isInlinableMemmove(dst, src, sz, config)
  1479          && clobber(call)
  1480          => (Move [sz] dst src mem)
  1481  
  1482  // Prefetch instructions (aux is option: 0 - DCBT ; 8 - DCBT stream)
  1483  (PrefetchCache ptr mem)          => (DCBT ptr mem [0])
  1484  (PrefetchCacheStreamed ptr mem)  => (DCBT ptr mem [8])
  1485  
  1486  

View as plain text