...

Text file src/cmd/compile/internal/ssa/_gen/PPC64.rules

Documentation: cmd/compile/internal/ssa/_gen

     1// Copyright 2016 The Go Authors. All rights reserved.
     2// Use of this source code is governed by a BSD-style
     3// license that can be found in the LICENSE file.
     4
     5// GOPPC64 values indicate power8, power9, etc.
     6// That means the code is compiled for that target,
     7// and will not run on earlier targets.
     8//
     9(Add(Ptr|64|32|16|8) ...) => (ADD ...)
    10(Add64F ...) => (FADD ...)
    11(Add32F ...) => (FADDS ...)
    12
    13(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
    14(Sub32F ...) => (FSUBS ...)
    15(Sub64F ...) => (FSUB ...)
    16
    17(Min(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMINJDP x y)
    18(Max(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMAXJDP x y)
    19
    20// Combine 64 bit integer multiply and adds
    21(ADD            z  l:(MULLD            x  y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD                        x    y                        z   )
    22(ADD            z  l:(MULLDconst <mt> [x] y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD (MOVDconst <mt> [int64(x)]) y                        z   )
    23(ADDconst <at> [z] l:(MULLD            x  y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD                        x    y (MOVDconst <at> [int64(z)]))
    24(ADDconst <at> [z] l:(MULLDconst <mt> [x] y)) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD (MOVDconst <mt> [int64(x)]) y (MOVDconst <at> [int64(z)]))
    25
    26(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    27(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    28(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    29(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    30(Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
    31(Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
    32(Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
    33(Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
    34(Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
    35(Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
    36(Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
    37(Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
    38
    39// (x + y) / 2 with x>=y => (x - y) / 2 + y
    40(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    41
    42(Mul64 ...) => (MULLD ...)
    43(Mul(32|16|8) ...) => (MULLW ...)
    44(Select0 (Mul64uhilo x y)) => (MULHDU x y)
    45(Select1 (Mul64uhilo x y)) => (MULLD x y)
    46(Select0 (Mul64uover x y)) => (MULLD x y)
    47(Select1 (Mul64uover x y)) => (SETBCR [2] (CMPconst [0] (MULHDU <x.Type> x y)))
    48
    49(Div64 [false] x y) => (DIVD x y)
    50(Div64u ...) => (DIVDU ...)
    51(Div32 [false] x y) => (DIVW x y)
    52(Div32u ...) => (DIVWU ...)
    53(Div16 [false]  x y) => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    54(Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    55(Div8 x y) => (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    56(Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    57
    58(Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
    59
    60(Mul(32|64)F ...) => ((FMULS|FMUL) ...)
    61
    62(Div(32|64)F ...) => ((FDIVS|FDIV) ...)
    63
    64// Lowering float <=> int
    65(Cvt32to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD (SignExt32to64 x)))
    66(Cvt64to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD x))
    67
    68(Cvt32Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
    69(Cvt64Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
    70
    71(Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
    72(Cvt64Fto32F ...) => (FRSP ...)
    73
    74(CvtBoolToUint8 ...) => (Copy ...)
    75
    76(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
    77
    78(Sqrt ...) => (FSQRT ...)
    79(Sqrt32 ...) => (FSQRTS ...)
    80(Floor ...) => (FFLOOR ...)
    81(Ceil ...) => (FCEIL ...)
    82(Trunc ...) => (FTRUNC ...)
    83(Round ...) => (FROUND ...)
    84(Copysign x y) => (FCPSGN y x)
    85(Abs ...) => (FABS ...)
    86(FMA ...) => (FMADD ...)
    87
    88// Lowering extension
    89// Note: we always extend to 64 bits even though some ops don't need that many result bits.
    90(SignExt8to(16|32|64) ...) => (MOVBreg ...)
    91(SignExt16to(32|64) ...) => (MOVHreg ...)
    92(SignExt32to64 ...) => (MOVWreg ...)
    93
    94(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
    95(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
    96(ZeroExt32to64 ...) => (MOVWZreg ...)
    97
    98(Trunc(16|32|64)to8 <t> x) && t.IsSigned() => (MOVBreg x)
    99(Trunc(16|32|64)to8  x) => (MOVBZreg x)
   100(Trunc(32|64)to16 <t> x) && t.IsSigned() => (MOVHreg x)
   101(Trunc(32|64)to16 x) => (MOVHZreg x)
   102(Trunc64to32 <t> x) && t.IsSigned() => (MOVWreg x)
   103(Trunc64to32 x) => (MOVWZreg x)
   104
   105// Lowering constants
   106(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   107(Const(32|64)F ...) => (FMOV(S|D)const ...)
   108(ConstNil) => (MOVDconst [0])
   109(ConstBool [t]) => (MOVDconst [b2i(t)])
   110
   111// Carrying addition.
   112(Select0 (Add64carry x y c)) =>            (Select0 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1]))))
   113(Select1 (Add64carry x y c)) => (ADDZEzero (Select1 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1])))))
   114// Fold initial carry bit if 0.
   115(ADDE x y (Select1 <typ.UInt64> (ADDCconst (MOVDconst [0]) [-1]))) => (ADDC x y)
   116// Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Add64carry.
   117(Select1 (ADDCconst n:(ADDZEzero x) [-1])) && n.Uses <= 2 => x
   118(ADDE (MOVDconst [0]) y c) => (ADDZE y c)
   119(ADDC x (MOVDconst [y])) && is16Bit(y) => (ADDCconst [y] x)
   120
   121// Borrowing subtraction.
   122(Select0 (Sub64borrow x y c)) =>                 (Select0 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))
   123(Select1 (Sub64borrow x y c)) => (NEG (SUBZEzero (Select1 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))))
   124// Fold initial borrow bit if 0.
   125(SUBE x y (Select1 <typ.UInt64> (SUBCconst (MOVDconst [0]) [0]))) => (SUBC x y)
   126// Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Sub64borrow.
   127(Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0])) && n.Uses <= 2 => x
   128
   129// Constant folding
   130(FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
   131(FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
   132(FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
   133(FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
   134(FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
   135
   136// Rotates
   137(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
   138(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
   139(RotateLeft(32|64) ...) => ((ROTLW|ROTL) ...)
   140
   141// Constant rotate generation
   142(ROTLW  x (MOVDconst [c])) => (ROTLWconst  x [c&31])
   143(ROTL   x (MOVDconst [c])) => (ROTLconst   x [c&63])
   144
   145// Combine rotate and mask operations
   146(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   147(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   148(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   149(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   150
   151// Note, any rotated word bitmask is still a valid word bitmask.
   152(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   153(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   154
   155(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   156(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   157(ANDconst [m] (SRDconst x [s])) && mergePPC64AndSrdi(m,s) != 0 => (RLWINM [mergePPC64AndSrdi(m,s)] x)
   158(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   159(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   160(AND (MOVDconst [m]) (SRDconst x [s])) && mergePPC64AndSrdi(m,s) != 0 => (RLWINM [mergePPC64AndSrdi(m,s)] x)
   161
   162(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   163(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   164(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   165(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   166
   167(ANDconst [m] (SLDconst x [s])) && mergePPC64AndSldi(m,s) != 0 => (RLWINM [mergePPC64AndSldi(m,s)] x)
   168(AND (MOVDconst [m]) (SLDconst x [s])) && mergePPC64AndSldi(m,s) != 0 => (RLWINM [mergePPC64AndSldi(m,s)] x)
   169
   170// Merge shift right + shift left and clear left (e.g for a table lookup)
   171(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
   172(CLRLSLDI [c] (SRDconst [s] x)) && mergePPC64ClrlsldiSrd(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrd(int64(c),s)] x)
   173(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
   174// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
   175(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
   176
   177// large constant signed right shift, we leave the sign bit
   178(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
   179(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
   180(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
   181(Rsh8x64  x (MOVDconst [c])) && uint64(c) >= 8  => (SRAWconst (SignExt8to32  x) [63])
   182
   183// constant shifts
   184((Lsh64|Rsh64|Rsh64U)x64  x (MOVDconst [c])) && uint64(c) < 64 => (S(L|RA|R)Dconst x [c])
   185((Lsh32|Rsh32|Rsh32U)x64  x (MOVDconst [c])) && uint64(c) < 32 => (S(L|RA|R)Wconst x [c])
   186((Rsh16|Rsh16U)x64  x (MOVDconst [c])) && uint64(c) < 16 => (SR(AW|W)const ((Sign|Zero)Ext16to32 x) [c])
   187(Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
   188((Rsh8|Rsh8U)x64  x (MOVDconst [c])) && uint64(c) < 8 => (SR(AW|W)const ((Sign|Zero)Ext8to32 x) [c])
   189(Lsh8x64  x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
   190
   191// Lower bounded shifts first. No need to check shift value.
   192(Lsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   193(Lsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   194(Lsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   195(Lsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SLD x y)
   196(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
   197(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
   198(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD (MOVHZreg x) y)
   199(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRD (MOVBZreg x) y)
   200(Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD x y)
   201(Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW x y)
   202(Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD (MOVHreg x) y)
   203(Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SRAD (MOVBreg x) y)
   204
   205// Unbounded shifts. Go shifts saturate to 0 or -1 when shifting beyond the number of
   206// bits in a type, PPC64 shifts do not (see the ISA for details).
   207//
   208// Note, y is always non-negative.
   209//
   210// Note, ISELZ is intentionally not used in lower. Where possible, ISEL is converted to ISELZ in late lower
   211// after all the ISEL folding rules have been exercised.
   212
   213((Rsh64U|Lsh64)x64 <t> x y)  => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPUconst y [64]))
   214((Rsh64U|Lsh64)x32 <t> x y)  => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPWUconst y [64]))
   215((Rsh64U|Lsh64)x16 <t> x y)  => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0xFFC0] y)))
   216((Rsh64U|Lsh64)x8  <t> x y)  => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0x00C0] y)))
   217(Rsh64x(64|32)     <t> x y)  => (ISEL [0] (SRAD    <t> x y) (SRADconst <t> x [63]) (CMP(U|WU)const y [64]))
   218(Rsh64x16          <t> x y)  => (ISEL [2] (SRAD    <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
   219(Rsh64x8           <t> x y)  => (ISEL [2] (SRAD    <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
   220
   221((Rsh32U|Lsh32)x64 <t> x y)  => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPUconst y [32]))
   222((Rsh32U|Lsh32)x32 <t> x y)  => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPWUconst y [32]))
   223((Rsh32U|Lsh32)x16 <t> x y)  => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0xFFE0] y)))
   224((Rsh32U|Lsh32)x8  <t> x y)  => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0x00E0] y)))
   225(Rsh32x(64|32)     <t> x y)  => (ISEL [0] (SRAW    <t> x y) (SRAWconst <t> x [31]) (CMP(U|WU)const y [32]))
   226(Rsh32x16          <t> x y)  => (ISEL [2] (SRAW    <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
   227(Rsh32x8           <t> x y)  => (ISEL [2] (SRAW    <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
   228
   229((Rsh16U|Lsh16)x64 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPUconst  y [16]))
   230((Rsh16U|Lsh16)x32 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPWUconst y [16]))
   231((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPconst [0] (ANDconst [0xFFF0] y)))
   232((Rsh16U|Lsh16)x8  <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPconst [0] (ANDconst [0x00F0] y)))
   233(Rsh16x(64|32)     <t> x y) => (ISEL [0] (SRAD     <t> (MOVHreg  x) y) (SRADconst <t>  (MOVHreg x) [15]) (CMP(U|WU)const  y [16]))
   234(Rsh16x16          <t> x y) => (ISEL [2] (SRAD     <t> (MOVHreg  x) y) (SRADconst <t>  (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
   235(Rsh16x8           <t> x y) => (ISEL [2] (SRAD     <t> (MOVHreg  x) y) (SRADconst <t>  (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
   236
   237((Rsh8U|Lsh8)x64 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPUconst  y [8]))
   238((Rsh8U|Lsh8)x32 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPWUconst y [8]))
   239((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPconst [0] (ANDconst [0xFFF8] y)))
   240((Rsh8U|Lsh8)x8  <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPconst [0] (ANDconst [0x00F8] y)))
   241(Rsh8x(64|32)    <t> x y) => (ISEL [0] (SRAD     <t> (MOVBreg  x) y) (SRADconst <t>  (MOVBreg x) [7]) (CMP(U|WU)const  y [8]))
   242(Rsh8x16         <t> x y) => (ISEL [2] (SRAD     <t> (MOVBreg  x) y) (SRADconst <t>  (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
   243(Rsh8x8          <t> x y) => (ISEL [2] (SRAD     <t> (MOVBreg  x) y) (SRADconst <t>  (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
   244
   245// Catch bounded shifts in situations like foo<<uint(shift&63) which might not be caught by the prove pass.
   246(CMP(U|WU)const [d] (ANDconst z [c])) && uint64(d) > uint64(c) => (FlagLT)
   247
   248(ORN x (MOVDconst [-1])) => x
   249
   250(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
   251(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
   252
   253(Addr {sym} base) => (MOVDaddr {sym} [0] base)
   254(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
   255(LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
   256(OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
   257(MOVDaddr {sym} [n] p:(ADD x y)) && sym == nil && n == 0 => p
   258(MOVDaddr {sym} [n] ptr) && sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) => ptr
   259
   260(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
   261
   262(Ctz64 x) && buildcfg.GOPPC64 <= 8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   263(Ctz32 x) && buildcfg.GOPPC64 <= 8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   264(Ctz16 x) && buildcfg.GOPPC64 <= 8 => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
   265(Ctz8  x) && buildcfg.GOPPC64 <= 8 => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
   266
   267(Ctz64 x) && buildcfg.GOPPC64 >= 9 => (CNTTZD x)
   268(Ctz32 x) && buildcfg.GOPPC64 >= 9 => (CNTTZW (MOVWZreg x))
   269(Ctz16 x) && buildcfg.GOPPC64 >= 9 => (CNTTZD (OR <typ.UInt64> x (MOVDconst [1<<16])))
   270(Ctz8  x) && buildcfg.GOPPC64 >= 9 => (CNTTZD (OR <typ.UInt64> x (MOVDconst [1<<8])))
   271
   272(BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
   273(BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
   274(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
   275
   276(PopCount64 ...) => (POPCNTD ...)
   277(PopCount(32|16|8) x) => (POPCNT(W|W|B) (MOV(W|H|B)Zreg x))
   278
   279(And(64|32|16|8) ...) => (AND ...)
   280(Or(64|32|16|8) ...) => (OR ...)
   281(Xor(64|32|16|8) ...) => (XOR ...)
   282
   283(Neg(64|32|16|8) ...) => (NEG ...)
   284(Neg(64|32)F ...) => (FNEG ...)
   285
   286(Com(64|32|16|8) x) => (NOR x x)
   287
   288// Lowering boolean ops
   289(AndB ...) => (AND ...)
   290(OrB ...) => (OR ...)
   291(Not x) => (XORconst [1] x)
   292
   293// Merge logical operations
   294(AND x (NOR y y)) => (ANDN x y)
   295(OR x (NOR y y)) => (ORN x y)
   296
   297// Lowering comparisons
   298(EqB x y)  => (ANDconst [1] (EQV x y))
   299// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   300(Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   301(Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   302(Eq(32|64|Ptr) x y) => (Equal ((CMPW|CMP|CMP) x y))
   303(Eq(32|64)F x y) => (Equal (FCMPU x y))
   304
   305(NeqB ...) => (XOR ...)
   306// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   307(Neq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (NotEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   308(Neq(8|16) x y)  => (NotEqual (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   309(Neq(32|64|Ptr) x y) => (NotEqual ((CMPW|CMP|CMP) x y))
   310(Neq(32|64)F x y) => (NotEqual (FCMPU x y))
   311
   312(Less(8|16) x y)  => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   313(Less(32|64) x y) => (LessThan ((CMPW|CMP) x y))
   314(Less(32|64)F x y) => (FLessThan (FCMPU x y))
   315
   316(Less(8|16)U x y)  => (LessThan (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   317(Less(32|64)U x y) => (LessThan ((CMPWU|CMPU) x y))
   318
   319(Leq(8|16) x y)  => (LessEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   320(Leq(32|64) x y) => (LessEqual ((CMPW|CMP) x y))
   321(Leq(32|64)F x y) => (FLessEqual (FCMPU x y))
   322
   323(Leq(8|16)U x y)  => (LessEqual (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   324(Leq(32|64)U x y) => (LessEqual (CMP(WU|U) x y))
   325
   326// Absorb pseudo-ops into blocks.
   327(If (Equal cc) yes no) => (EQ cc yes no)
   328(If (NotEqual cc) yes no) => (NE cc yes no)
   329(If (LessThan cc) yes no) => (LT cc yes no)
   330(If (LessEqual cc) yes no) => (LE cc yes no)
   331(If (GreaterThan cc) yes no) => (GT cc yes no)
   332(If (GreaterEqual cc) yes no) => (GE cc yes no)
   333(If (FLessThan cc) yes no) => (FLT cc yes no)
   334(If (FLessEqual cc) yes no) => (FLE cc yes no)
   335(If (FGreaterThan cc) yes no) => (FGT cc yes no)
   336(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
   337
   338(If cond yes no) => (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
   339
   340// Absorb boolean tests into block
   341(NE (CMPconst [0] (ANDconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
   342(NE (CMPconst [0] (ANDconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
   343
   344// absorb flag constants into branches
   345(EQ (FlagEQ) yes no) => (First yes no)
   346(EQ (FlagLT) yes no) => (First no yes)
   347(EQ (FlagGT) yes no) => (First no yes)
   348
   349(NE (FlagEQ) yes no) => (First no yes)
   350(NE (FlagLT) yes no) => (First yes no)
   351(NE (FlagGT) yes no) => (First yes no)
   352
   353(LT (FlagEQ) yes no) => (First no yes)
   354(LT (FlagLT) yes no) => (First yes no)
   355(LT (FlagGT) yes no) => (First no yes)
   356
   357(LE (FlagEQ) yes no) => (First yes no)
   358(LE (FlagLT) yes no) => (First yes no)
   359(LE (FlagGT) yes no) => (First no yes)
   360
   361(GT (FlagEQ) yes no) => (First no yes)
   362(GT (FlagLT) yes no) => (First no yes)
   363(GT (FlagGT) yes no) => (First yes no)
   364
   365(GE (FlagEQ) yes no) => (First yes no)
   366(GE (FlagLT) yes no) => (First no yes)
   367(GE (FlagGT) yes no) => (First yes no)
   368
   369// absorb InvertFlags into branches
   370(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
   371(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
   372(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
   373(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
   374(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
   375(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
   376
   377// constant comparisons
   378(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
   379(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  => (FlagLT)
   380(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  => (FlagGT)
   381
   382(CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
   383(CMPconst (MOVDconst [x]) [y]) && x<y  => (FlagLT)
   384(CMPconst (MOVDconst [x]) [y]) && x>y  => (FlagGT)
   385
   386(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  => (FlagEQ)
   387(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
   388(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
   389
   390(CMPUconst (MOVDconst [x]) [y]) && x==y  => (FlagEQ)
   391(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
   392(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
   393
   394// absorb flag constants into boolean values
   395(Equal (FlagEQ)) => (MOVDconst [1])
   396(Equal (FlagLT)) => (MOVDconst [0])
   397(Equal (FlagGT)) => (MOVDconst [0])
   398
   399(NotEqual (FlagEQ)) => (MOVDconst [0])
   400(NotEqual (FlagLT)) => (MOVDconst [1])
   401(NotEqual (FlagGT)) => (MOVDconst [1])
   402
   403(LessThan (FlagEQ)) => (MOVDconst [0])
   404(LessThan (FlagLT)) => (MOVDconst [1])
   405(LessThan (FlagGT)) => (MOVDconst [0])
   406
   407(LessEqual (FlagEQ)) => (MOVDconst [1])
   408(LessEqual (FlagLT)) => (MOVDconst [1])
   409(LessEqual (FlagGT)) => (MOVDconst [0])
   410
   411(GreaterThan (FlagEQ)) => (MOVDconst [0])
   412(GreaterThan (FlagLT)) => (MOVDconst [0])
   413(GreaterThan (FlagGT)) => (MOVDconst [1])
   414
   415(GreaterEqual (FlagEQ)) => (MOVDconst [1])
   416(GreaterEqual (FlagLT)) => (MOVDconst [0])
   417(GreaterEqual (FlagGT)) => (MOVDconst [1])
   418
   419// absorb InvertFlags into boolean values
   420((Equal|NotEqual|LessThan|GreaterThan|LessEqual|GreaterEqual) (InvertFlags x)) => ((Equal|NotEqual|GreaterThan|LessThan|GreaterEqual|LessEqual) x)
   421
   422
   423// Elide compares of bit tests
   424((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
   425((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
   426((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
   427
   428(CondSelect x y (SETBC [a] cmp))  => (ISEL [a] x y cmp)
   429(CondSelect x y (SETBCR [a] cmp))  => (ISEL [a+4] x y cmp)
   430// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
   431(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
   432// Fold any CR -> GPR -> CR transfers when applying the above rule.
   433(ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
   434(ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp)
   435(ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp)
   436
   437// Lowering loads
   438(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   439(Load <t> ptr mem) && is32BitInt(t) &&  t.IsSigned() => (MOVWload ptr mem)
   440(Load <t> ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem)
   441(Load <t> ptr mem) && is16BitInt(t) &&  t.IsSigned() => (MOVHload ptr mem)
   442(Load <t> ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem)
   443(Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
   444(Load <t> ptr mem) && is8BitInt(t) &&  t.IsSigned() => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   445(Load <t> ptr mem) && is8BitInt(t) && !t.IsSigned() => (MOVBZload ptr mem)
   446
   447(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   448(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   449
   450(Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   451(Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVSstore ptr val mem)
   452(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   453(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   454(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   455(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   456
   457// Using Zero instead of LoweredZero allows the
   458// target address to be folded where possible.
   459(Zero [0] _ mem) => mem
   460(Zero [1] destptr mem) => (MOVBstorezero destptr mem)
   461(Zero [2] destptr mem) =>
   462	(MOVHstorezero destptr mem)
   463(Zero [3] destptr mem) =>
   464	(MOVBstorezero [2] destptr
   465		(MOVHstorezero destptr mem))
   466(Zero [4] destptr mem) =>
   467	(MOVWstorezero destptr mem)
   468(Zero [5] destptr mem) =>
   469	(MOVBstorezero [4] destptr
   470        	(MOVWstorezero destptr mem))
   471(Zero [6] destptr mem) =>
   472	(MOVHstorezero [4] destptr
   473		(MOVWstorezero destptr mem))
   474(Zero [7] destptr mem) =>
   475	(MOVBstorezero [6] destptr
   476		(MOVHstorezero [4] destptr
   477			(MOVWstorezero destptr mem)))
   478
   479(Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem)
   480(Zero [12] {t} destptr mem) =>
   481        (MOVWstorezero [8] destptr
   482                (MOVDstorezero [0] destptr mem))
   483(Zero [16] {t} destptr mem) =>
   484       (MOVDstorezero [8] destptr
   485                (MOVDstorezero [0] destptr mem))
   486(Zero [24] {t} destptr mem) =>
   487       (MOVDstorezero [16] destptr
   488               (MOVDstorezero [8] destptr
   489                       (MOVDstorezero [0] destptr mem)))
   490(Zero [32] {t} destptr mem) =>
   491       (MOVDstorezero [24] destptr
   492               (MOVDstorezero [16] destptr
   493                       (MOVDstorezero [8] destptr
   494                               (MOVDstorezero [0] destptr mem))))
   495
   496// Handle cases not handled above
   497// Lowered Short cases do not generate loops, and as a result don't clobber
   498// the address registers or flags.
   499(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
   500(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
   501(Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
   502(Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
   503
   504// moves
   505(Move [0] _ _ mem) => mem
   506(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
   507(Move [2] dst src mem) =>
   508        (MOVHstore dst (MOVHZload src mem) mem)
   509(Move [4] dst src mem) =>
   510	(MOVWstore dst (MOVWZload src mem) mem)
   511// MOVD for load and store must have offsets that are multiple of 4
   512(Move [8] {t} dst src mem) =>
   513	(MOVDstore dst (MOVDload src mem) mem)
   514(Move [3] dst src mem) =>
   515        (MOVBstore [2] dst (MOVBZload [2] src mem)
   516                (MOVHstore dst (MOVHload src mem) mem))
   517(Move [5] dst src mem) =>
   518        (MOVBstore [4] dst (MOVBZload [4] src mem)
   519                (MOVWstore dst (MOVWZload src mem) mem))
   520(Move [6] dst src mem) =>
   521        (MOVHstore [4] dst (MOVHZload [4] src mem)
   522                (MOVWstore dst (MOVWZload src mem) mem))
   523(Move [7] dst src mem) =>
   524        (MOVBstore [6] dst (MOVBZload [6] src mem)
   525                (MOVHstore [4] dst (MOVHZload [4] src mem)
   526                        (MOVWstore dst (MOVWZload src mem) mem)))
   527
   528// Large move uses a loop. Since the address is computed and the
   529// offset is zero, any alignment can be used.
   530(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
   531        (LoweredMove [s] dst src mem)
   532(Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
   533        (LoweredQuadMoveShort [s] dst src mem)
   534(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
   535        (LoweredQuadMove [s] dst src mem)
   536
   537// Calls
   538// Lowering calls
   539(StaticCall ...) => (CALLstatic ...)
   540(ClosureCall ...) => (CALLclosure ...)
   541(InterCall ...) => (CALLinter ...)
   542(TailCall ...) => (CALLtail ...)
   543
   544// Miscellaneous
   545(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   546(GetCallerSP ...) => (LoweredGetCallerSP ...)
   547(GetCallerPC ...) => (LoweredGetCallerPC ...)
   548(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
   549(IsInBounds idx len) => (LessThan (CMPU idx len))
   550(IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
   551(NilCheck ...) => (LoweredNilCheck ...)
   552
   553// Write barrier.
   554(WB ...) => (LoweredWB ...)
   555
   556// Publication barrier as intrinsic
   557(PubBarrier ...) => (LoweredPubBarrier ...)
   558
   559(PanicBounds ...) => (LoweredPanicBoundsRR ...)
   560(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
   561(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
   562(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
   563(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
   564
   565// Optimizations
   566// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   567// so ORconst, XORconst easily expand into a pair.
   568
   569// Include very-large constants in the const-const case.
   570(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
   571(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
   572(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
   573(ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
   574(ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
   575(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
   576
   577// Discover consts
   578(AND x (MOVDconst [-1])) => x
   579(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
   580(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
   581(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
   582
   583// Simplify consts
   584(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
   585(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
   586(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
   587(ANDconst [-1] x) => x
   588(ANDconst [0] _) => (MOVDconst [0])
   589(XORconst [0] x) => x
   590(ORconst [-1] _) => (MOVDconst [-1])
   591(ORconst [0] x) => x
   592
   593// zero-extend of small and => small and
   594(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
   595(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
   596(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
   597(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
   598
   599// sign extend of small-positive and => small-positive-and
   600(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
   601(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
   602(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   603(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
   604
   605// small and of zero-extend => either zero-extend or small and
   606(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
   607(ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x)
   608(ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF => y
   609(ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x)
   610
   611(AND (MOVDconst [c]) y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF => y
   612(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
   613// normal case
   614(ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x)
   615(ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x)
   616(ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x)
   617
   618// Eliminate unnecessary sign/zero extend following right shift
   619(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
   620(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
   621(MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
   622(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
   623(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
   624(MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
   625
   626(MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() <= 32 => (S(R|RA)Wconst [c] x)
   627(MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() <= 16 => (S(R|RA)Wconst [c] x)
   628(MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() == 8 => (S(R|RA)Wconst [c] x)
   629
   630// initial right shift will handle sign/zero extend
   631(MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
   632(MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
   633(MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
   634(MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
   635(MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
   636(MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
   637(MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
   638(MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
   639
   640(MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
   641(MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
   642(MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
   643(MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
   644(MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
   645(MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
   646(MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
   647(MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
   648
   649(MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
   650(MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
   651(MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
   652(MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
   653
   654// Various redundant zero/sign extension combinations.
   655(MOVBZreg y:(MOVBZreg _)) => y  // repeat
   656(MOVBreg y:(MOVBreg _)) => y // repeat
   657(MOVBreg (MOVBZreg x)) => (MOVBreg x)
   658(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
   659
   660// Catch any remaining rotate+shift cases
   661(MOVBZreg (SRWconst x [s])) && mergePPC64AndSrwi(0xFF,s) != 0 => (RLWINM [mergePPC64AndSrwi(0xFF,s)] x)
   662(MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
   663(MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
   664(MOVWZreg (RLWINM [r] y)) && mergePPC64MovwzregRlwinm(r) != 0 => (RLWINM [mergePPC64MovwzregRlwinm(r)] y)
   665(ANDconst [m] (RLWINM [r] y)) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
   666(SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
   667(RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
   668(RLWINM [r] (ANDconst [a] u)) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
   669// SLWconst is a special case of RLWNM which always zero-extends the result.
   670(SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w)
   671(MOVWZreg w:(SLWconst u)) => w
   672
   673// H - there are more combinations than these
   674
   675(MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat
   676(MOVHZreg y:(MOVHBRload _ _)) => y
   677
   678(MOVHreg y:(MOV(H|B)reg _)) => y // repeat
   679
   680(MOV(H|HZ)reg y:(MOV(HZ|H)reg x)) => (MOV(H|HZ)reg x)
   681
   682// W - there are more combinations than these
   683
   684(MOV(WZ|WZ|WZ|W|W|W)reg y:(MOV(WZ|HZ|BZ|W|H|B)reg _)) => y // repeat
   685(MOVWZreg y:(MOV(H|W)BRload _ _)) => y
   686
   687(MOV(W|WZ)reg y:(MOV(WZ|W)reg x)) => (MOV(W|WZ)reg x)
   688
   689// Truncate then logical then truncate: omit first, lesser or equal truncate
   690(MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
   691(MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   692(MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   693(MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   694(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   695(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   696
   697(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
   698(MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
   699(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
   700(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
   701
   702// Arithmetic constant ops
   703
   704(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [c] x)
   705(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
   706(ADDconst [0] x) => x
   707(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
   708
   709(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
   710(ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
   711
   712(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
   713
   714// Subtract from (with carry, but ignored) constant.
   715// Note, these clobber the carry bit.
   716(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
   717(SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
   718(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
   719(SUBFCconst [0] x) => (NEG x)
   720(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
   721(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
   722(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
   723(NEG (SUB x y)) => (SUB y x)
   724(NEG (NEG x)) => x
   725
   726// Use register moves instead of stores and loads to move int<=>float values
   727// Common with math Float64bits, Float64frombits
   728(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
   729(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
   730
   731(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
   732(MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
   733
   734(MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
   735(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
   736
   737(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
   738(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
   739
   740// Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx)
   741// or non-indexed (MOV*load or MOV*store) should be used. Indexed instructions
   742// require an extra instruction and register to load the index so non-indexed is preferred.
   743// Indexed ops generate indexed load or store instructions for all GOPPC64 values.
   744// Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits,
   745// and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops.
   746// On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits.
   747// and support for PC relative addressing must be available if relocation is needed.
   748// On power10, the assembler will determine when to use DS-form or prefixed
   749// instructions for non-indexed ops depending on the value of the offset.
   750//
   751// Fold offsets for stores.
   752(MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
   753
   754(FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
   755
   756// Fold address into load/store.
   757// If power10 with PCRel is not available, then
   758// the assembler needs to generate several instructions and use
   759// temp register for accessing global, and each time it will reload
   760// the temp register. So don't fold address of global in that case if there is more than
   761// one use.
   762(MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   763	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   764        (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   765
   766(FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   767	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   768        (FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   769
   770(MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   771	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   772        (MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   773(MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   774	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   775        (MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   776(FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   777	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   778        (FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   779
   780// Fold offsets for loads.
   781(FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
   782
   783(MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
   784
   785// Determine load + addressing that can be done as a register indexed load
   786(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
   787
   788// See comments above concerning selection of indexed vs. non-indexed ops.
   789// These cases don't have relocation.
   790(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
   791(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   792(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
   793(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   794
   795// Store of zero => storezero
   796(MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem)
   797
   798// Fold offsets for storezero
   799(MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) =>
   800    (MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem)
   801
   802// Stores with addressing that can be done as indexed stores
   803(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
   804
   805(MOVDstoreidx ptr (MOVDconst [c]) val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
   806(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   807(MOVDstoreidx (MOVDconst [c]) ptr val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
   808(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   809
   810// Fold symbols into storezero
   811(MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   812	&& ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   813    (MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   814
   815// atomic intrinsics
   816(AtomicLoad(8|32|64|Ptr)  ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
   817(AtomicLoadAcq(32|64)     ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
   818
   819(AtomicStore(8|32|64)    ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
   820(AtomicStoreRel(32|64)   ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
   821
   822(AtomicExchange(8|32|64) ...) => (LoweredAtomicExchange(8|32|64) ...)
   823
   824(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
   825
   826(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
   827(AtomicCompareAndSwapRel32   ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
   828
   829(AtomicAnd(8|32)  ...) => (LoweredAtomicAnd(8|32)  ...)
   830(AtomicOr(8|32)   ...) => (LoweredAtomicOr(8|32)   ...)
   831
   832(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
   833(ANDconst [1] z:(SRADconst [63] x)) && z.Uses == 1  => (SRDconst [63] x)
   834
   835// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
   836// This may interact with other patterns in the future. (Compare with arm64)
   837(MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
   838(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
   839(MOV(H|W)Zreg x:(MOVHZload _ _)) => x
   840(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
   841(MOV(H|W)reg x:(MOVHload _ _)) => x
   842(MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
   843(MOV(WZ|W)reg x:(MOV(WZ|W)load _ _)) => x
   844(MOV(WZ|W)reg x:(MOV(WZ|W)loadidx _ _ _)) => x
   845(MOV(B|W)Zreg x:(Select0 (LoweredAtomicLoad(8|32) _ _))) => x
   846
   847// don't extend if argument is already extended
   848(MOVBreg x:(Arg <t>)) && is8BitInt(t) && t.IsSigned() => x
   849(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !t.IsSigned() => x
   850(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && t.IsSigned() => x
   851(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !t.IsSigned() => x
   852(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned() => x
   853(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned() => x
   854
   855(MOVBZreg (MOVDconst [c]))  => (MOVDconst [int64(uint8(c))])
   856(MOVBreg (MOVDconst [c]))  => (MOVDconst [int64(int8(c))])
   857(MOVHZreg (MOVDconst [c]))  => (MOVDconst [int64(uint16(c))])
   858(MOVHreg (MOVDconst [c]))  => (MOVDconst [int64(int16(c))])
   859(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
   860(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
   861
   862// Implement clrsldi and clrslwi extended mnemonics as described in
   863// ISA 3.0 section C.8. AuxInt field contains values needed for
   864// the instructions, packed together since there is only one available.
   865(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
   866(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
   867(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
   868
   869(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
   870(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
   871(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
   872(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
   873(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
   874(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
   875// special case for power9
   876(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
   877
   878// Lose widening ops fed to stores
   879(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   880(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   881(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   882(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   883(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   884(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
   885(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
   886(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
   887(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   888(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   889(MOVHBRstore ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore ptr x mem)
   890(MOVWBRstore ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore ptr x mem)
   891
   892// Lose W-widening ops fed to compare-W
   893(CMP(W|WU) x (MOV(W|WZ)reg y)) => (CMP(W|WU) x y)
   894(CMP(W|WU) (MOV(W|WZ)reg x) y) => (CMP(W|WU) x y)
   895
   896(CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
   897(CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
   898(CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
   899(CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
   900
   901(CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
   902(CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
   903(CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
   904(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
   905
   906// Canonicalize the order of arguments to comparisons - helps with CSE.
   907((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
   908
   909// n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value.
   910// Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
   911(CMP(W|U|WU)const [0] a:(ANDconst [n] z)) => (CMPconst [0] a)
   912
   913// SETBC auxInt values 0=LT 1=GT 2=EQ   Crbit==1 ? 1 : 0
   914// SETBCR auxInt values 0=LT 1=GT 2=EQ   Crbit==1 ? 0 : 1
   915(Equal cmp) => (SETBC [2] cmp)
   916(NotEqual cmp) => (SETBCR [2] cmp)
   917(LessThan cmp) => (SETBC [0] cmp)
   918(FLessThan cmp) => (SETBC [0] cmp)
   919(FLessEqual cmp) => (OR (SETBC [2] cmp) (SETBC [0] cmp))
   920(GreaterEqual cmp) => (SETBCR [0] cmp)
   921(GreaterThan cmp)  => (SETBC [1] cmp)
   922(FGreaterEqual cmp) => (OR (SETBC [2] cmp) (SETBC [1] cmp))
   923(FGreaterThan cmp)  => (SETBC [1] cmp)
   924(LessEqual cmp) => (SETBCR [1] cmp)
   925
   926(SETBC [0] (FlagLT)) => (MOVDconst [1])
   927(SETBC [0] (Flag(GT|EQ))) => (MOVDconst [0])
   928(SETBC [1] (FlagGT)) => (MOVDconst [1])
   929(SETBC [1] (Flag(LT|EQ))) => (MOVDconst [0])
   930(SETBC [2] (FlagEQ)) => (MOVDconst [1])
   931(SETBC [2] (Flag(LT|GT))) => (MOVDconst [0])
   932
   933(SETBCR [0] (FlagLT)) => (MOVDconst [0])
   934(SETBCR [0] (Flag(GT|EQ))) => (MOVDconst [1])
   935(SETBCR [1] (FlagGT)) => (MOVDconst [0])
   936(SETBCR [1] (Flag(LT|EQ))) => (MOVDconst [1])
   937(SETBCR [2] (FlagEQ)) => (MOVDconst [0])
   938(SETBCR [2] (Flag(LT|GT))) => (MOVDconst [1])
   939
   940(SETBC [0] (InvertFlags bool)) => (SETBC [1] bool)
   941(SETBC [1] (InvertFlags bool)) => (SETBC [0] bool)
   942(SETBC [2] (InvertFlags bool)) => (SETBC [2] bool)
   943
   944(SETBCR [0] (InvertFlags bool)) => (SETBCR [1] bool)
   945(SETBCR [1] (InvertFlags bool)) => (SETBCR [0] bool)
   946(SETBCR [2] (InvertFlags bool)) => (SETBCR [2] bool)
   947
   948// ISEL auxInt values 0=LT 1=GT 2=EQ   arg2 ? arg0 : arg1
   949// ISEL auxInt values 4=GE 5=LE 6=NE   !arg2 ? arg1 : arg0
   950
   951(ISEL [2] x _ (FlagEQ)) => x
   952(ISEL [2] _ y (Flag(LT|GT))) => y
   953
   954(ISEL [6] _ y (FlagEQ)) => y
   955(ISEL [6] x _ (Flag(LT|GT))) => x
   956
   957(ISEL [0] _ y (Flag(EQ|GT))) => y
   958(ISEL [0] x _ (FlagLT)) => x
   959
   960(ISEL [5] _ x (Flag(EQ|LT))) => x
   961(ISEL [5] y _ (FlagGT)) => y
   962
   963(ISEL [1] _ y (Flag(EQ|LT))) => y
   964(ISEL [1] x _ (FlagGT)) => x
   965
   966(ISEL [4] x _ (Flag(EQ|GT))) => x
   967(ISEL [4] _ y (FlagLT)) => y
   968
   969(SETBC [n] (InvertFlags bool)) => (SETBCR [n] bool)
   970(SETBCR [n] (InvertFlags bool)) => (SETBC [n] bool)
   971
   972(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
   973(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
   974(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
   975(XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp)
   976(XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp)
   977
   978(SETBC  [2] (CMPconst [0] a:(ANDconst [1] _))) => (XORconst [1] a)
   979(SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) => a
   980
   981// Only CMPconst for these in case AND|OR|XOR result is > 32 bits
   982(SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
   983(SETBCR [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (ANDCC y z )))
   984
   985(SETBC [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ORCC y z )))
   986(SETBCR [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (ORCC y z )))
   987
   988(SETBC [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (XORCC y z )))
   989(SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
   990
   991// A particular pattern seen in cgo code:
   992(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
   993
   994// floating point negative abs
   995(FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
   996
   997// floating-point fused multiply-add/sub
   998(F(ADD|SUB) (FMUL x y) z) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
   999(F(ADDS|SUBS) (FMULS x y) z) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
  1000
  1001// Arch-specific inlining for small or disjoint runtime.memmove
  1002(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore  _ src s3:(MOVDstore {t} _ dst mem)))))
  1003        && sz >= 0
  1004        && isSameCall(sym, "runtime.memmove")
  1005        && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1006        && isInlinableMemmove(dst, src, sz, config)
  1007        && clobber(s1, s2, s3, call)
  1008        => (Move [sz] dst src mem)
  1009
  1010// Match post-lowering calls, register version.
  1011(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
  1012        && sz >= 0
  1013        && isSameCall(sym, "runtime.memmove")
  1014        && call.Uses == 1
  1015        && isInlinableMemmove(dst, src, sz, config)
  1016        && clobber(call)
  1017        => (Move [sz] dst src mem)
  1018
  1019// Prefetch instructions (TH specified using aux field)
  1020// For DCBT Ra,Rb,TH, A value of TH indicates:
  1021//     0, hint this cache line will be used soon. (PrefetchCache)
  1022//     16, hint this cache line will not be used for long. (PrefetchCacheStreamed)
  1023// See ISA 3.0 Book II 4.3.2 for more detail. https://openpower.foundation/specifications/isa/
  1024(PrefetchCache ptr mem)          => (DCBT ptr mem [0])
  1025(PrefetchCacheStreamed ptr mem)  => (DCBT ptr mem [16])
  1026
  1027// Use byte reverse instructions on Power10
  1028(Bswap(16|32|64) x) && buildcfg.GOPPC64>=10 => (BR(H|W|D) x)
  1029
  1030// Fold bit reversal into loads.
  1031(BR(W|H) x:(MOV(W|H)Zload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1032(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
  1033(BRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1034(BRD x:(MOVDloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx ptr idx mem)
  1035
  1036// Fold bit reversal into stores.
  1037(MOV(D|W|H)store [off] {sym} ptr r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
  1038(MOV(D|W|H)storeidx ptr idx      r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstoreidx ptr idx val mem)
  1039
  1040// GOPPC64<10 rules.
  1041// These Bswap operations should only be introduced by the memcombine pass in places where they can be folded into loads or stores.
  1042(Bswap(32|16) x:(MOV(W|H)Zload [off] {sym} ptr mem)) => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1043(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx      mem)) => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
  1044(Bswap64 x:(MOVDload [off] {sym} ptr mem)) => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1045(Bswap64 x:(MOVDloadidx ptr idx      mem)) => @x.Block (MOVDBRloadidx ptr idx mem)
  1046(MOV(D|W|H)store [off] {sym} ptr (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
  1047(MOV(D|W|H)storeidx ptr idx      (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstoreidx ptr idx val mem)

View as plain text