...

Text file src/cmd/compile/internal/ssa/_gen/S390X.rules

Documentation: cmd/compile/internal/ssa/_gen

     1// Copyright 2016 The Go Authors. All rights reserved.
     2// Use of this source code is governed by a BSD-style
     3// license that can be found in the LICENSE file.
     4
     5// Lowering arithmetic
     6(Add(64|Ptr) ...) => (ADD ...)
     7(Add(32|16|8) ...) => (ADDW ...)
     8(Add32F x y) => (Select0 (FADDS x y))
     9(Add64F x y) => (Select0 (FADD x y))
    10
    11(Sub(64|Ptr) ...) => (SUB ...)
    12(Sub(32|16|8) ...) => (SUBW ...)
    13(Sub32F x y) => (Select0 (FSUBS x y))
    14(Sub64F x y) => (Select0 (FSUB x y))
    15
    16(Mul64 ...) => (MULLD ...)
    17(Mul(32|16|8) ...) => (MULLW ...)
    18(Mul32F ...) => (FMULS ...)
    19(Mul64F ...) => (FMUL ...)
    20(Mul64uhilo ...) => (MLGR ...)
    21
    22(Div32F ...) => (FDIVS ...)
    23(Div64F ...) => (FDIV ...)
    24
    25(Div64 x y) => (DIVD x y)
    26(Div64u ...) => (DIVDU ...)
    27// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
    28// so a sign/zero extension of the dividend is required.
    29(Div32  x y) => (DIVW  (MOVWreg x) y)
    30(Div32u x y) => (DIVWU (MOVWZreg x) y)
    31(Div16  x y) => (DIVW  (MOVHreg x) (MOVHreg y))
    32(Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y))
    33(Div8   x y) => (DIVW  (MOVBreg x) (MOVBreg y))
    34(Div8u  x y) => (DIVWU (MOVBZreg x) (MOVBZreg y))
    35
    36(Hmul(64|64u) ...) => (MULH(D|DU) ...)
    37(Hmul32  x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
    38(Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
    39
    40(Mod64 x y) => (MODD x y)
    41(Mod64u ...) => (MODDU ...)
    42// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
    43// so a sign/zero extension of the dividend is required.
    44(Mod32  x y) => (MODW  (MOVWreg x) y)
    45(Mod32u x y) => (MODWU (MOVWZreg x) y)
    46(Mod16  x y) => (MODW  (MOVHreg x) (MOVHreg y))
    47(Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y))
    48(Mod8   x y) => (MODW  (MOVBreg x) (MOVBreg y))
    49(Mod8u  x y) => (MODWU (MOVBZreg x) (MOVBZreg y))
    50
    51// (x + y) / 2 with x>=y -> (x - y) / 2 + y
    52(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    53
    54(And64 ...) => (AND ...)
    55(And(32|16|8) ...) => (ANDW ...)
    56
    57(Or64 ...) => (OR ...)
    58(Or(32|16|8) ...) => (ORW ...)
    59
    60(Xor64 ...) => (XOR ...)
    61(Xor(32|16|8) ...) => (XORW ...)
    62
    63(Neg64 ...) => (NEG ...)
    64(Neg(32|16|8) ...) => (NEGW ...)
    65(Neg32F ...) => (FNEGS ...)
    66(Neg64F ...) => (FNEG ...)
    67
    68(Com64 ...) => (NOT ...)
    69(Com(32|16|8) ...) => (NOTW ...)
    70(NOT x) => (XOR (MOVDconst [-1]) x)
    71(NOTW x) => (XORWconst [-1] x)
    72
    73// Lowering boolean ops
    74(AndB ...) => (ANDW ...)
    75(OrB ...) => (ORW ...)
    76(Not x) => (XORWconst [1] x)
    77
    78// Lowering pointer arithmetic
    79(OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr)
    80(OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr)
    81(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
    82
    83(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
    84
    85// Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
    86(Ctz64 <t> x) => (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
    87(Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
    88(Ctz16 x) => (Ctz64 (Or64 <typ.UInt64> x (MOVDconst [1<<16])))
    89(Ctz8  x) => (Ctz64 (Or64 <typ.UInt64> x (MOVDconst [1<<8])))
    90
    91(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
    92(BitLen(32|16|8) x) => (BitLen64 (ZeroExt(32|16|8)to64 x))
    93
    94// POPCNT treats the input register as a vector of 8 bytes, producing
    95// a population count for each individual byte. For inputs larger than
    96// a single byte we therefore need to sum the individual bytes produced
    97// by the POPCNT instruction. For example, the following instruction
    98// sequence could be used to calculate the population count of a 4-byte
    99// value:
   100//
   101//     MOVD   $0x12345678, R1 // R1=0x12345678 <-- input
   102//     POPCNT R1, R2          // R2=0x02030404
   103//     SRW    $16, R2, R3     // R3=0x00000203
   104//     ADDW   R2, R3, R4      // R4=0x02030607
   105//     SRW    $8, R4, R5      // R5=0x00020306
   106//     ADDW   R4, R5, R6      // R6=0x0205090d
   107//     MOVBZ  R6, R7          // R7=0x0000000d <-- result is 13
   108//
   109(PopCount8  x) => (POPCNT (MOVBZreg x))
   110(PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
   111(PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
   112(PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
   113
   114// SumBytes{2,4,8} pseudo operations sum the values of the rightmost
   115// 2, 4 or 8 bytes respectively. The result is a single byte however
   116// other bytes might contain junk so a zero extension is required if
   117// the desired output type is larger than 1 byte.
   118(SumBytes2 x) => (ADDW (SRWconst <typ.UInt8> x [8]) x)
   119(SumBytes4 x) => (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
   120(SumBytes8 x) => (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
   121
   122(Bswap64 ...) => (MOVDBR ...)
   123(Bswap32 ...) => (MOVWBR ...)
   124
   125// add with carry
   126(Select0 (Add64carry x y c))
   127  => (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
   128(Select1 (Add64carry x y c))
   129  => (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
   130
   131// subtract with borrow
   132(Select0 (Sub64borrow x y c))
   133  => (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
   134(Select1 (Sub64borrow x y c))
   135  => (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
   136
   137// math package intrinsics
   138(Sqrt      ...) => (FSQRT ...)
   139(Floor       x) => (FIDBR [7] x)
   140(Ceil        x) => (FIDBR [6] x)
   141(Trunc       x) => (FIDBR [5] x)
   142(RoundToEven x) => (FIDBR [4] x)
   143(Round       x) => (FIDBR [1] x)
   144(FMA     x y z) => (FMADD z x y)
   145
   146(Sqrt32    ...) => (FSQRTS ...)
   147
   148(Max(64|32)F ...) => (WFMAX(D|S)B ...)
   149(Min(64|32)F ...) => (WFMIN(D|S)B ...)
   150
   151// Atomic loads and stores.
   152// The SYNC instruction (fast-BCR-serialization) prevents store-load
   153// reordering. Other sequences of memory operations (load-load,
   154// store-store and load-store) are already guaranteed not to be reordered.
   155(AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem)
   156(AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem))
   157
   158// Store-release doesn't require store-load ordering.
   159(AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem)
   160
   161// Atomic adds.
   162(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem))
   163(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem))
   164(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDW val (Select0 <t> tuple))
   165(Select1     (AddTupleFirst32   _ tuple)) => (Select1 tuple)
   166(Select0 <t> (AddTupleFirst64 val tuple)) => (ADD val (Select0 <t> tuple))
   167(Select1     (AddTupleFirst64   _ tuple)) => (Select1 tuple)
   168
   169// Atomic exchanges.
   170(AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem)
   171(AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem)
   172
   173// Atomic compare and swap.
   174(AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem)
   175(AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem)
   176
   177// Atomic and: *(*uint8)(ptr) &= val
   178//
   179// Round pointer down to nearest word boundary and pad value with ones before
   180// applying atomic AND operation to target word.
   181//
   182// *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3))
   183//
   184(AtomicAnd8 ptr val mem)
   185  => (LANfloor
   186       ptr
   187       (RLL <typ.UInt32>
   188         (ORWconst <typ.UInt32> val [-1<<8])
   189         (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
   190       mem)
   191
   192// Atomic or: *(*uint8)(ptr) |= val
   193//
   194// Round pointer down to nearest word boundary and pad value with zeros before
   195// applying atomic OR operation to target word.
   196//
   197// *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3))
   198//
   199(AtomicOr8  ptr val mem)
   200  => (LAOfloor
   201       ptr
   202       (SLW <typ.UInt32>
   203         (MOVBZreg <typ.UInt32> val)
   204         (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
   205       mem)
   206
   207(AtomicAnd32 ...) => (LAN ...)
   208(AtomicOr32  ...) => (LAO ...)
   209
   210// Lowering extension
   211// Note: we always extend to 64 bits even though some ops don't need that many result bits.
   212(SignExt8to(16|32|64) ...) => (MOVBreg ...)
   213(SignExt16to(32|64) ...) => (MOVHreg ...)
   214(SignExt32to64 ...) => (MOVWreg ...)
   215
   216(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
   217(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
   218(ZeroExt32to64 ...) => (MOVWZreg ...)
   219
   220(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
   221
   222// Lowering truncation
   223// Because we ignore high parts of registers, truncates are just copies.
   224(Trunc(16|32|64)to8 ...) => (Copy ...)
   225(Trunc(32|64)to16 ...) => (Copy ...)
   226(Trunc64to32 ...) => (Copy ...)
   227
   228// Lowering float <-> int
   229(Cvt32to32F ...) => (CEFBRA ...)
   230(Cvt32to64F ...) => (CDFBRA ...)
   231(Cvt64to32F ...) => (CEGBRA ...)
   232(Cvt64to64F ...) => (CDGBRA ...)
   233
   234(Cvt32Fto32 ...) => (CFEBRA ...)
   235(Cvt32Fto64 ...) => (CGEBRA ...)
   236(Cvt64Fto32 ...) => (CFDBRA ...)
   237(Cvt64Fto64 ...) => (CGDBRA ...)
   238
   239// Lowering float <-> uint
   240(Cvt32Uto32F ...) => (CELFBR ...)
   241(Cvt32Uto64F ...) => (CDLFBR ...)
   242(Cvt64Uto32F ...) => (CELGBR ...)
   243(Cvt64Uto64F ...) => (CDLGBR ...)
   244
   245(Cvt32Fto32U ...) => (CLFEBR ...)
   246(Cvt32Fto64U ...) => (CLGEBR ...)
   247(Cvt64Fto32U ...) => (CLFDBR ...)
   248(Cvt64Fto64U ...) => (CLGDBR ...)
   249
   250// Lowering float32 <-> float64
   251(Cvt32Fto64F ...) => (LDEBR ...)
   252(Cvt64Fto32F ...) => (LEDBR ...)
   253
   254(CvtBoolToUint8 ...) => (Copy ...)
   255
   256(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
   257
   258// Lowering shifts
   259
   260// Lower bounded shifts first. No need to check shift value.
   261(Lsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   262(Lsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   263(Lsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   264(Lsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SLW x y)
   265(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
   266(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
   267(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
   268(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
   269(Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD x y)
   270(Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW x y)
   271(Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
   272(Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
   273
   274// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
   275//   result = shift >= 64 ? 0 : arg << shift
   276(Lsh(64|32|16|8)x64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
   277(Lsh(64|32|16|8)x32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
   278(Lsh(64|32|16|8)x16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
   279(Lsh(64|32|16|8)x8  <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
   280
   281(Rsh(64|32)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
   282(Rsh(64|32)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
   283(Rsh(64|32)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
   284(Rsh(64|32)Ux8  <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
   285
   286(Rsh(16|8)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
   287(Rsh(16|8)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
   288(Rsh(16|8)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
   289(Rsh(16|8)Ux8  <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
   290
   291// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
   292// We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63.
   293//   result = arg >> (shift >= 64 ? 63 : shift)
   294(Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst  y [64])))
   295(Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
   296(Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
   297(Rsh(64|32)x8  x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
   298
   299(Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst  y [64])))
   300(Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
   301(Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
   302(Rsh(16|8)x8  x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
   303
   304// Lowering rotates
   305(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
   306(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
   307(RotateLeft32 ...) => (RLL  ...)
   308(RotateLeft64 ...) => (RLLG ...)
   309
   310// Lowering comparisons
   311(Less64      x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   312(Less32      x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   313(Less(16|8)  x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
   314(Less64U     x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
   315(Less32U     x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
   316(Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
   317(Less64F     x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   318(Less32F     x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   319
   320(Leq64      x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   321(Leq32      x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   322(Leq(16|8)  x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
   323(Leq64U     x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
   324(Leq32U     x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
   325(Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
   326(Leq64F     x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   327(Leq32F     x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   328
   329(Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   330(Eq32       x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   331(Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
   332(Eq64F      x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   333(Eq32F      x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   334
   335(Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
   336(Neq32       x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
   337(Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
   338(Neq64F      x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
   339(Neq32F      x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
   340
   341// Lowering loads
   342(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   343(Load <t> ptr mem) && is32BitInt(t) &&  t.IsSigned() => (MOVWload ptr mem)
   344(Load <t> ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem)
   345(Load <t> ptr mem) && is16BitInt(t) &&  t.IsSigned() => (MOVHload ptr mem)
   346(Load <t> ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem)
   347(Load <t> ptr mem) && is8BitInt(t)  &&  t.IsSigned() => (MOVBload ptr mem)
   348(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) => (MOVBZload ptr mem)
   349(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   350(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   351
   352// Lowering stores
   353(Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   354(Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVSstore ptr val mem)
   355(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   356(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   357(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   358(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   359
   360// Lowering moves
   361
   362// Load and store for small copies.
   363(Move [0] _ _ mem) => mem
   364(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
   365(Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem)
   366(Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem)
   367(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
   368(Move [16] dst src mem) =>
   369	(MOVDstore [8] dst (MOVDload [8] src mem)
   370		(MOVDstore dst (MOVDload src mem) mem))
   371(Move [24] dst src mem) =>
   372        (MOVDstore [16] dst (MOVDload [16] src mem)
   373	        (MOVDstore [8] dst (MOVDload [8] src mem)
   374                (MOVDstore dst (MOVDload src mem) mem)))
   375(Move [3] dst src mem) =>
   376	(MOVBstore [2] dst (MOVBZload [2] src mem)
   377		(MOVHstore dst (MOVHZload src mem) mem))
   378(Move [5] dst src mem) =>
   379	(MOVBstore [4] dst (MOVBZload [4] src mem)
   380		(MOVWstore dst (MOVWZload src mem) mem))
   381(Move [6] dst src mem) =>
   382	(MOVHstore [4] dst (MOVHZload [4] src mem)
   383		(MOVWstore dst (MOVWZload src mem) mem))
   384(Move [7] dst src mem) =>
   385	(MOVBstore [6] dst (MOVBZload [6] src mem)
   386		(MOVHstore [4] dst (MOVHZload [4] src mem)
   387			(MOVWstore dst (MOVWZload src mem) mem)))
   388
   389// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
   390(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
   391	(MVC [makeValAndOff(int32(s), 0)] dst src mem)
   392(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
   393	(MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
   394(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
   395	(MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
   396(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
   397	(MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
   398
   399// Move more than 1024 bytes using a loop.
   400(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
   401	(LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
   402
   403// Lowering Zero instructions
   404(Zero [0] _ mem) => mem
   405(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
   406(Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem)
   407(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
   408(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
   409(Zero [3] destptr mem) =>
   410	(MOVBstoreconst [makeValAndOff(0,2)] destptr
   411		(MOVHstoreconst [0] destptr mem))
   412(Zero [5] destptr mem) =>
   413	(MOVBstoreconst [makeValAndOff(0,4)] destptr
   414		(MOVWstoreconst [0] destptr mem))
   415(Zero [6] destptr mem) =>
   416	(MOVHstoreconst [makeValAndOff(0,4)] destptr
   417		(MOVWstoreconst [0] destptr mem))
   418(Zero [7] destptr mem) =>
   419	(MOVWstoreconst [makeValAndOff(0,3)] destptr
   420		(MOVWstoreconst [0] destptr mem))
   421
   422(Zero [s] destptr mem) && s > 0 && s <= 1024 =>
   423	(CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
   424
   425// Zero more than 1024 bytes using a loop.
   426(Zero [s] destptr mem) && s > 1024 =>
   427	(LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
   428
   429// Lowering constants
   430(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   431(Const(32|64)F ...) => (FMOV(S|D)const ...)
   432(ConstNil) => (MOVDconst [0])
   433(ConstBool [t]) => (MOVDconst [b2i(t)])
   434
   435// Lowering calls
   436(StaticCall ...) => (CALLstatic ...)
   437(ClosureCall ...) => (CALLclosure ...)
   438(InterCall ...) => (CALLinter ...)
   439(TailCall ...) => (CALLtail ...)
   440
   441// Miscellaneous
   442(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
   443(IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
   444(IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
   445(NilCheck ...) => (LoweredNilCheck ...)
   446(GetG ...) => (LoweredGetG ...)
   447(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   448(GetCallerSP ...) => (LoweredGetCallerSP ...)
   449(GetCallerPC ...) => (LoweredGetCallerPC ...)
   450(Addr {sym} base) => (MOVDaddr {sym} base)
   451(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
   452(LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
   453(ITab (Load ptr mem)) => (MOVDload ptr mem)
   454
   455// block rewrites
   456(If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
   457
   458// Write barrier.
   459(WB ...) => (LoweredWB ...)
   460
   461(PanicBounds ...) => (LoweredPanicBoundsRR ...)
   462(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
   463(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
   464(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
   465(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
   466
   467// ***************************
   468// Above: lowering rules
   469// Below: optimizations
   470// ***************************
   471// TODO: Should the optimizations be a separate pass?
   472
   473// Note: when removing unnecessary sign/zero extensions.
   474//
   475// After a value is spilled it is restored using a sign- or zero-extension
   476// to register-width as appropriate for its type. For example, a uint8 will
   477// be restored using a MOVBZ (llgc) instruction which will zero extend the
   478// 8-bit value to 64-bits.
   479//
   480// This is a hazard when folding sign- and zero-extensions since we need to
   481// ensure not only that the value in the argument register is correctly
   482// extended but also that it will still be correctly extended if it is
   483// spilled and restored.
   484//
   485// In general this means we need type checks when the RHS of a rule is an
   486// OpCopy (i.e. "(... x:(...) ...) -> x").
   487
   488// Merge double extensions.
   489(MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   490(MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   491(MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
   492
   493// Bypass redundant sign extensions.
   494(MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   495(MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   496(MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   497(MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
   498(MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
   499(MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
   500
   501// Bypass redundant zero extensions.
   502(MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   503(MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   504(MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
   505(MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
   506(MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
   507(MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
   508
   509// Remove zero extensions after zero extending load.
   510// Note: take care that if x is spilled it is restored correctly.
   511(MOV(B|H|W)Zreg x:(MOVBZload    _   _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
   512(MOV(H|W)Zreg   x:(MOVHZload    _   _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
   513(MOVWZreg       x:(MOVWZload    _   _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x
   514
   515// Remove sign extensions after sign extending load.
   516// Note: take care that if x is spilled it is restored correctly.
   517(MOV(B|H|W)reg x:(MOVBload    _   _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
   518(MOV(H|W)reg   x:(MOVHload    _   _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
   519(MOVWreg       x:(MOVWload    _   _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
   520
   521// Remove sign extensions after zero extending load.
   522// These type checks are probably unnecessary but do them anyway just in case.
   523(MOV(H|W)reg x:(MOVBZload    _   _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
   524(MOVWreg     x:(MOVHZload    _   _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
   525
   526// Fold sign and zero extensions into loads.
   527//
   528// Note: The combined instruction must end up in the same block
   529// as the original load. If not, we end up making a value with
   530// memory type live in two different blocks, which can lead to
   531// multiple memory values alive simultaneously.
   532//
   533// Make sure we don't combine these ops if the load has another use.
   534// This prevents a single load from being split into multiple loads
   535// which then might return different values.  See test/atomicload.go.
   536(MOV(B|H|W)Zreg <t> x:(MOV(B|H|W)load [o] {s} p mem))
   537  && x.Uses == 1
   538  && clobber(x)
   539  => @x.Block (MOV(B|H|W)Zload <t> [o] {s} p mem)
   540(MOV(B|H|W)reg <t> x:(MOV(B|H|W)Zload [o] {s} p mem))
   541  && x.Uses == 1
   542  && clobber(x)
   543  => @x.Block (MOV(B|H|W)load <t> [o] {s} p mem)
   544
   545// Remove zero extensions after argument load.
   546(MOVBZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() == 1 => x
   547(MOVHZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 2 => x
   548(MOVWZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 4 => x
   549
   550// Remove sign extensions after argument load.
   551(MOVBreg x:(Arg <t>)) && t.IsSigned() && t.Size() == 1 => x
   552(MOVHreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 2 => x
   553(MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x
   554
   555// Fold zero extensions into constants.
   556(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))])
   557(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
   558(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
   559
   560// Fold sign extensions into constants.
   561(MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
   562(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
   563(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
   564
   565// Remove zero extension of conditional move.
   566// Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering.
   567(MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
   568  && int64(uint8(c)) == c
   569  && int64(uint8(d)) == d
   570  && (!x.Type.IsSigned() || x.Type.Size() > 1)
   571  => x
   572
   573// Fold boolean tests into blocks.
   574// Note: this must match If statement lowering.
   575(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
   576  && int32(x) != 0
   577  => (BRC {d} cmp yes no)
   578
   579// Canonicalize BRC condition code mask by removing impossible conditions.
   580// Integer comparisons cannot generate the unordered condition.
   581(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)    _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
   582(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
   583
   584// Compare-and-branch.
   585// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
   586(BRC {c} (CMP   x y) yes no) => (CGRJ  {c&^s390x.Unordered} x y yes no)
   587(BRC {c} (CMPW  x y) yes no) => (CRJ   {c&^s390x.Unordered} x y yes no)
   588(BRC {c} (CMPU  x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no)
   589(BRC {c} (CMPWU x y) yes no) => (CLRJ  {c&^s390x.Unordered} x y yes no)
   590
   591// Compare-and-branch (immediate).
   592// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
   593(BRC {c} (CMPconst   x [y]) yes no) && y == int32( int8(y)) => (CGIJ  {c&^s390x.Unordered} x [ int8(y)] yes no)
   594(BRC {c} (CMPWconst  x [y]) yes no) && y == int32( int8(y)) => (CIJ   {c&^s390x.Unordered} x [ int8(y)] yes no)
   595(BRC {c} (CMPUconst  x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
   596(BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ  {c&^s390x.Unordered} x [uint8(y)] yes no)
   597
   598// Absorb immediate into compare-and-branch.
   599(C(R|GR)J  {c} x (MOVDconst [y]) yes no) && is8Bit(y)  => (C(I|GI)J  {c} x [ int8(y)] yes no)
   600(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no)
   601(C(R|GR)J  {c} (MOVDconst [x]) y yes no) && is8Bit(x)  => (C(I|GI)J  {c.ReverseComparison()} y [ int8(x)] yes no)
   602(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no)
   603
   604// Prefer comparison with immediate to compare-and-branch.
   605(CGRJ  {c} x (MOVDconst [y]) yes no) && !is8Bit(y)  && is32Bit(y)  => (BRC {c} (CMPconst   x [int32(y)]) yes no)
   606(CRJ   {c} x (MOVDconst [y]) yes no) && !is8Bit(y)  && is32Bit(y)  => (BRC {c} (CMPWconst  x [int32(y)]) yes no)
   607(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst  x [int32(y)]) yes no)
   608(CLRJ  {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no)
   609(CGRJ  {c} (MOVDconst [x]) y yes no) && !is8Bit(x)  && is32Bit(x)  => (BRC {c.ReverseComparison()} (CMPconst   y [int32(x)]) yes no)
   610(CRJ   {c} (MOVDconst [x]) y yes no) && !is8Bit(x)  && is32Bit(x)  => (BRC {c.ReverseComparison()} (CMPWconst  y [int32(x)]) yes no)
   611(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst  y [int32(x)]) yes no)
   612(CLRJ  {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
   613
   614// Absorb sign/zero extensions into 32-bit compare-and-branch.
   615(CIJ  {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ  {c} x [y] yes no)
   616(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no)
   617
   618// Bring out-of-range signed immediates into range by varying branch condition.
   619(BRC {s390x.Less}           (CMPconst  x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual}    x [ 127] yes no)
   620(BRC {s390x.Less}           (CMPWconst x [ 128]) yes no) => (CIJ  {s390x.LessOrEqual}    x [ 127] yes no)
   621(BRC {s390x.LessOrEqual}    (CMPconst  x [-129]) yes no) => (CGIJ {s390x.Less}           x [-128] yes no)
   622(BRC {s390x.LessOrEqual}    (CMPWconst x [-129]) yes no) => (CIJ  {s390x.Less}           x [-128] yes no)
   623(BRC {s390x.Greater}        (CMPconst  x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
   624(BRC {s390x.Greater}        (CMPWconst x [-129]) yes no) => (CIJ  {s390x.GreaterOrEqual} x [-128] yes no)
   625(BRC {s390x.GreaterOrEqual} (CMPconst  x [ 128]) yes no) => (CGIJ {s390x.Greater}        x [ 127] yes no)
   626(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ  {s390x.Greater}        x [ 127] yes no)
   627
   628// Bring out-of-range unsigned immediates into range by varying branch condition.
   629(BRC {s390x.Less}           (CMP(WU|U)const  x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no)
   630(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const  x [256]) yes no) => (C(L|LG)IJ {s390x.Greater}     x [255] yes no)
   631
   632// Bring out-of-range immediates into range by switching signedness (only == and !=).
   633(BRC {c} (CMPconst   x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no)
   634(BRC {c} (CMPWconst  x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ  {c} x [uint8(y)] yes no)
   635(BRC {c} (CMPUconst  x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ  {c} x [ int8(y)] yes no)
   636(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ   {c} x [ int8(y)] yes no)
   637
   638// Fold constants into instructions.
   639(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [int32(c)] x)
   640(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
   641
   642(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
   643(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst <v.Type> x [int32(c)]))
   644(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)])
   645(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst <v.Type> x [int32(c)]))
   646
   647(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x)
   648(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x)
   649
   650// NILF instructions leave the high 32 bits unchanged which is
   651// equivalent to the leftmost 32 bits being set.
   652// TODO(mundaym): modify the assembler to accept 64-bit values
   653// and use isU32Bit(^c).
   654(AND x (MOVDconst [c]))
   655  && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
   656  => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
   657(AND x (MOVDconst [c]))
   658  && is32Bit(c)
   659  && c < 0
   660  => (ANDconst [c] x)
   661(AND x (MOVDconst [c]))
   662  && is32Bit(c)
   663  && c >= 0
   664  => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
   665
   666(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
   667
   668((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
   669
   670((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x)
   671((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
   672
   673// Constant shifts.
   674(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
   675(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
   676(S(LW|RW)     _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
   677(SRAW         x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
   678
   679// Shifts only use the rightmost 6 bits of the shift value.
   680(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
   681  && r.Amount == 0
   682  && r.OutMask()&63 == 63
   683  => (S(LD|RD|RAD|LW|RW|RAW) x y)
   684(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
   685  => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
   686(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
   687  => (S(LD|RD|RAD|LW|RW|RAW) x y)
   688(SLD  x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD  x y)
   689(SRD  x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD  x y)
   690(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
   691(SLW  x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW  x y)
   692(SRW  x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW  x y)
   693(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
   694
   695// Match rotate by constant.
   696(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
   697(RLL  x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
   698
   699// Signed 64-bit comparison with immediate.
   700(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
   701(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
   702
   703// Unsigned 64-bit comparison with immediate.
   704(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)])
   705(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)]))
   706
   707// Signed and unsigned 32-bit comparison with immediate.
   708(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
   709(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
   710
   711// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
   712(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
   713
   714// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
   715(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min(63, int8(63-c+d))), uint8(int8(c-d)&63))})
   716
   717// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
   718(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
   719(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
   720(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
   721
   722// Absorb 'rotate then insert selected bits [into zero]' into zero extension.
   723(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
   724(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
   725(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
   726
   727// Absorb shift into 'rotate then insert selected bits [into zero]'.
   728//
   729// Any unsigned shift can be represented as a rotate and mask operation:
   730//
   731//   x << c => RotateLeft64(x, c) & (^uint64(0) << c)
   732//   x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
   733//
   734// Therefore when a shift is used as the input to a rotate then insert
   735// selected bits instruction we can merge the two together. We just have
   736// to be careful that the resultant mask is representable (non-zero and
   737// contiguous). For example, assuming that x is variable and c, y and m
   738// are constants, a shift followed by a rotate then insert selected bits
   739// could be represented as:
   740//
   741//   RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
   742//
   743// We can split the rotation by y into two, one rotate for x and one for
   744// the mask:
   745//
   746//   RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
   747//
   748// The rotations of x by c followed by y can then be combined:
   749//
   750//   RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
   751//   ^^^^^^^^^^^^^^^^^^^^   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
   752//          rotate                          mask
   753//
   754// To perform this optimization we therefore just need to check that it
   755// is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
   756// bits mask (i.e. that the resultant mask is non-zero and contiguous).
   757//
   758(RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
   759(RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
   760
   761// Absorb 'rotate then insert selected bits [into zero]' into left shift.
   762(SLDconst (RISBGZ x {r}) [c])
   763  && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
   764  => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
   765
   766// Absorb 'rotate then insert selected bits [into zero]' into right shift.
   767(SRDconst (RISBGZ x {r}) [c])
   768  && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
   769  => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
   770
   771// Merge 'rotate then insert selected bits [into zero]' instructions together.
   772(RISBGZ (RISBGZ x {y}) {z})
   773  && z.InMerge(y.OutMask()) != nil
   774  => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
   775
   776// Convert RISBGZ into 64-bit shift (helps CSE).
   777(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
   778(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
   779
   780// Optimize single bit isolation when it is known to be equivalent to
   781// the most significant bit due to mask produced by arithmetic shift.
   782// Simply isolate the most significant bit itself and place it in the
   783// correct position.
   784//
   785// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
   786(RISBGZ (SRADconst x [c]) {r})
   787  && r.Start == r.End           // single bit selected
   788  && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
   789  => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
   790
   791// Canonicalize the order of arguments to comparisons - helps with CSE.
   792((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
   793
   794// Use sign/zero extend instead of RISBGZ.
   795(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
   796(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
   797(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
   798
   799// Use sign/zero extend instead of ANDW.
   800(ANDWconst [0x00ff] x) => (MOVBZreg x)
   801(ANDWconst [0xffff] x) => (MOVHZreg x)
   802
   803// Strength reduce multiplication to the sum (or difference) of two powers of two.
   804//
   805// Examples:
   806//     5x -> 4x + 1x
   807//    10x -> 8x + 2x
   808//   120x -> 128x - 8x
   809//  -120x -> 8x - 128x
   810//
   811// We know that the rightmost bit of any positive value, once isolated, must either
   812// be a power of 2 (because it is a single bit) or 0 (if the original value is 0).
   813// In all of these rules we use a rightmost bit calculation to determine one operand
   814// for the addition or subtraction. We then just need to calculate if the other
   815// operand is a valid power of 2 before we can match the rule.
   816//
   817// Notes:
   818//   - the generic rules have already matched single powers of two so we ignore them here
   819//   - isPowerOfTwo asserts that its argument is greater than 0
   820//   - c&(c-1) = clear rightmost bit
   821//   - c&^(c-1) = isolate rightmost bit
   822
   823// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
   824(MULL(D|W)const <t> x [c]) && isPowerOfTwo(c&(c-1))
   825  => ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
   826                 (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
   827
   828// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
   829(MULL(D|W)const <t> x [c]) && isPowerOfTwo(c+(c&^(c-1)))
   830  => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
   831                 (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
   832
   833// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
   834(MULL(D|W)const <t> x [c]) && isPowerOfTwo(-c+(-c&^(-c-1)))
   835  => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
   836                 (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
   837
   838// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
   839(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
   840(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
   841(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx)
   842
   843// fold ADDconst into MOVDaddrx
   844(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
   845(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
   846(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
   847
   848// reverse ordering of compare instruction
   849(LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp)
   850
   851// replace load from same location as preceding store with copy
   852(MOVDload  [off] {sym} ptr1 (MOVDstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
   853(MOVWload  [off] {sym} ptr1 (MOVWstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x)
   854(MOVHload  [off] {sym} ptr1 (MOVHstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x)
   855(MOVBload  [off] {sym} ptr1 (MOVBstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x)
   856(MOVWZload [off] {sym} ptr1 (MOVWstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x)
   857(MOVHZload [off] {sym} ptr1 (MOVHstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x)
   858(MOVBZload [off] {sym} ptr1 (MOVBstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x)
   859(MOVDload  [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x)
   860(FMOVDload [off] {sym} ptr1 (MOVDstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x)
   861(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
   862(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
   863
   864// prefer FPR <-> GPR moves over combined load ops
   865(MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR <t> y))
   866(ADDload   <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD   x (LGDR <t> y))
   867(SUBload   <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB   x (LGDR <t> y))
   868(ORload    <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR    x (LGDR <t> y))
   869(ANDload   <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND   x (LGDR <t> y))
   870(XORload   <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR   x (LGDR <t> y))
   871
   872// detect attempts to set/clear the sign bit
   873// may need to be reworked when NIHH/OIHH are added
   874(RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
   875(LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
   876(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
   877(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
   878
   879// detect attempts to set the sign bit with load
   880(LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
   881
   882// detect copysign
   883(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
   884  && r == s390x.NewRotateParams(0, 0, 0)
   885  => (LGDR (CPSDR <t> y x))
   886(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
   887  && c >= 0
   888  && r == s390x.NewRotateParams(0, 0, 0)
   889  => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
   890(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
   891(CPSDR y (FMOVDconst [c])) && math.Signbit(c)  => (LNDFR y)
   892
   893// absorb negations into set/clear sign bit
   894(FNEG  (LPDFR x)) => (LNDFR x)
   895(FNEG  (LNDFR x)) => (LPDFR x)
   896(FNEGS (LPDFR x)) => (LNDFR x)
   897(FNEGS (LNDFR x)) => (LPDFR x)
   898
   899// no need to convert float32 to float64 to set/clear sign bit
   900(LEDBR (LPDFR (LDEBR x))) => (LPDFR x)
   901(LEDBR (LNDFR (LDEBR x))) => (LNDFR x)
   902
   903// remove unnecessary FPR <-> GPR moves
   904(LDGR (LGDR x)) => x
   905(LGDR (LDGR x)) => x
   906
   907// Don't extend before storing
   908(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   909(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   910(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   911(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   912(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   913(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   914
   915// Fold constants into memory operations.
   916// Note that this is not always a good idea because if not all the uses of
   917// the ADDconst get eliminated, we still have to compute the ADDconst and we now
   918// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
   919// Nevertheless, let's do it!
   920(MOVDload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload  [off1+off2] {sym} ptr mem)
   921(MOVWload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload  [off1+off2] {sym} ptr mem)
   922(MOVHload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload  [off1+off2] {sym} ptr mem)
   923(MOVBload   [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload  [off1+off2] {sym} ptr mem)
   924(MOVWZload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem)
   925(MOVHZload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem)
   926(MOVBZload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem)
   927(FMOVSload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem)
   928(FMOVDload  [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem)
   929
   930(MOVDstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore  [off1+off2] {sym} ptr val mem)
   931(MOVWstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore  [off1+off2] {sym} ptr val mem)
   932(MOVHstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore  [off1+off2] {sym} ptr val mem)
   933(MOVBstore  [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore  [off1+off2] {sym} ptr val mem)
   934(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem)
   935(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem)
   936
   937(ADDload   [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload   [off1+off2] {sym} x ptr mem)
   938(ADDWload  [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload  [off1+off2] {sym} x ptr mem)
   939(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem)
   940(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem)
   941(SUBload   [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload   [off1+off2] {sym} x ptr mem)
   942(SUBWload  [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload  [off1+off2] {sym} x ptr mem)
   943
   944(ANDload   [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload   [off1+off2] {sym} x ptr mem)
   945(ANDWload  [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload  [off1+off2] {sym} x ptr mem)
   946(ORload    [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload    [off1+off2] {sym} x ptr mem)
   947(ORWload   [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload   [off1+off2] {sym} x ptr mem)
   948(XORload   [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload   [off1+off2] {sym} x ptr mem)
   949(XORWload  [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload  [off1+off2] {sym} x ptr mem)
   950
   951// Fold constants into stores.
   952(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
   953	(MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
   954(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
   955	(MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
   956(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
   957	(MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
   958(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
   959	(MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
   960
   961// Fold address offsets into constant stores.
   962(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
   963	(MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
   964(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
   965	(MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
   966(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
   967	(MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
   968(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) =>
   969	(MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
   970
   971// Merge address calculations into loads and stores.
   972// Offsets from SB must not be merged into unaligned memory accesses because
   973// loads/stores using PC-relative addressing directly must be aligned to the
   974// size of the target.
   975(MOVDload   [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
   976	(MOVDload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   977(MOVWZload  [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
   978	(MOVWZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   979(MOVHZload  [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
   980	(MOVHZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   981(MOVBZload  [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
   982	(MOVBZload  [off1+off2] {mergeSym(sym1,sym2)} base mem)
   983(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
   984	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   985(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
   986	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   987
   988(MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
   989	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   990(MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
   991	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   992(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
   993	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
   994
   995(MOVDstore  [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
   996	(MOVDstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   997(MOVWstore  [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
   998	(MOVWstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   999(MOVHstore  [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
  1000	(MOVHstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1001(MOVBstore  [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1002	(MOVBstore  [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1003(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1004	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1005(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
  1006	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
  1007
  1008(ADDload   [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload   [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1009(ADDWload  [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload  [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1010(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1011(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1012(SUBload   [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload   [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1013(SUBWload  [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload  [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1014
  1015(ANDload   [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload   [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1016(ANDWload  [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload  [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1017(ORload    [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload    [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1018(ORWload   [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload   [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1019(XORload   [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload   [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1020(XORWload  [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload  [o1+o2] {mergeSym(s1, s2)} x ptr mem)
  1021
  1022// Cannot store constant to SB directly (no 'move relative long immediate' instructions).
  1023(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
  1024	(MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1025(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
  1026	(MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1027(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
  1028	(MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1029(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
  1030	(MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
  1031
  1032// MOVDaddr into MOVDaddridx
  1033(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
  1034       (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
  1035(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
  1036       (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
  1037
  1038// Absorb InvertFlags into branches.
  1039(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)
  1040
  1041// Constant comparisons.
  1042(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ)
  1043(CMPconst (MOVDconst [x]) [y]) && x<int64(y) => (FlagLT)
  1044(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT)
  1045(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ)
  1046(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
  1047(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
  1048
  1049(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
  1050(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
  1051(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
  1052(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ)
  1053(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
  1054(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
  1055
  1056(CMP(W|WU)const (MOVBZreg _) [c]) &&   0xff < c => (FlagLT)
  1057(CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT)
  1058
  1059(CMPconst  (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
  1060(CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
  1061
  1062(CMPUconst  (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n) => (FlagLT)
  1063(CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n) => (FlagLT)
  1064
  1065(CMPWconst  (ANDWconst _ [m]) [n]) && int32(m) >= 0 &&  int32(m) <  int32(n) => (FlagLT)
  1066(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
  1067
  1068(CMPconst  (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
  1069(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
  1070
  1071// Constant compare-and-branch with immediate.
  1072(CGIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   != 0 &&  int64(x) ==  int64(y) => (First yes no)
  1073(CGIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    != 0 &&  int64(x) <   int64(y) => (First yes no)
  1074(CGIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 &&  int64(x) >   int64(y) => (First yes no)
  1075(CIJ   {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   != 0 &&  int32(x) ==  int32(y) => (First yes no)
  1076(CIJ   {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    != 0 &&  int32(x) <   int32(y) => (First yes no)
  1077(CIJ   {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 &&  int32(x) >   int32(y) => (First yes no)
  1078(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   != 0 && uint64(x) == uint64(y) => (First yes no)
  1079(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    != 0 && uint64(x) <  uint64(y) => (First yes no)
  1080(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) >  uint64(y) => (First yes no)
  1081(CLIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   != 0 && uint32(x) == uint32(y) => (First yes no)
  1082(CLIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    != 0 && uint32(x) <  uint32(y) => (First yes no)
  1083(CLIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) >  uint32(y) => (First yes no)
  1084(CGIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   == 0 &&  int64(x) ==  int64(y) => (First no yes)
  1085(CGIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    == 0 &&  int64(x) <   int64(y) => (First no yes)
  1086(CGIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 &&  int64(x) >   int64(y) => (First no yes)
  1087(CIJ   {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   == 0 &&  int32(x) ==  int32(y) => (First no yes)
  1088(CIJ   {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    == 0 &&  int32(x) <   int32(y) => (First no yes)
  1089(CIJ   {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 &&  int32(x) >   int32(y) => (First no yes)
  1090(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   == 0 && uint64(x) == uint64(y) => (First no yes)
  1091(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    == 0 && uint64(x) <  uint64(y) => (First no yes)
  1092(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) >  uint64(y) => (First no yes)
  1093(CLIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal   == 0 && uint32(x) == uint32(y) => (First no yes)
  1094(CLIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less    == 0 && uint32(x) <  uint32(y) => (First no yes)
  1095(CLIJ  {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) >  uint32(y) => (First no yes)
  1096
  1097// Constant compare-and-branch with immediate when unsigned comparison with zero.
  1098(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no)
  1099(C(L|LG)IJ {s390x.Less}           _ [0] yes no) => (First no yes)
  1100
  1101// Constant compare-and-branch when operands match.
  1102(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no)
  1103(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes)
  1104
  1105// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
  1106// to unsigned comparisons.
  1107// Helps simplify constant comparison detection.
  1108(CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c])
  1109(CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
  1110(CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
  1111(CMPconst  (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c])
  1112(CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0           => (CMPWUconst x [c])
  1113(CMPconst  x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst  x [n])
  1114(CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n])
  1115
  1116// Absorb sign and zero extensions into 32-bit comparisons.
  1117(CMP(W|W|WU|WU)      x (MOV(W|WZ|W|WZ)reg y))   => (CMP(W|W|WU|WU) x y)
  1118(CMP(W|W|WU|WU)      (MOV(W|WZ|W|WZ)reg x) y)   => (CMP(W|W|WU|WU) x y)
  1119(CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c])
  1120
  1121// Absorb flag constants into branches.
  1122(BRC {c} (FlagEQ) yes no) && c&s390x.Equal     != 0 => (First yes no)
  1123(BRC {c} (FlagLT) yes no) && c&s390x.Less      != 0 => (First yes no)
  1124(BRC {c} (FlagGT) yes no) && c&s390x.Greater   != 0 => (First yes no)
  1125(BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no)
  1126
  1127(BRC {c} (FlagEQ) yes no) && c&s390x.Equal     == 0 => (First no yes)
  1128(BRC {c} (FlagLT) yes no) && c&s390x.Less      == 0 => (First no yes)
  1129(BRC {c} (FlagGT) yes no) && c&s390x.Greater   == 0 => (First no yes)
  1130(BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes)
  1131
  1132// Absorb flag constants into SETxx ops.
  1133(LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal     != 0 => x
  1134(LOCGR {c} _ x (FlagLT)) && c&s390x.Less      != 0 => x
  1135(LOCGR {c} _ x (FlagGT)) && c&s390x.Greater   != 0 => x
  1136(LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x
  1137
  1138(LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal     == 0 => x
  1139(LOCGR {c} x _ (FlagLT)) && c&s390x.Less      == 0 => x
  1140(LOCGR {c} x _ (FlagGT)) && c&s390x.Greater   == 0 => x
  1141(LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x
  1142
  1143// Remove redundant *const ops
  1144(ADDconst [0] x) => x
  1145(ADDWconst [c] x) && int32(c)==0 => x
  1146(SUBconst [0] x) => x
  1147(SUBWconst [c] x) && int32(c) == 0 => x
  1148(ANDconst [0] _)                 => (MOVDconst [0])
  1149(ANDWconst [c] _) && int32(c)==0  => (MOVDconst [0])
  1150(ANDconst [-1] x)                => x
  1151(ANDWconst [c] x) && int32(c)==-1 => x
  1152(ORconst [0] x)                  => x
  1153(ORWconst [c] x) && int32(c)==0   => x
  1154(ORconst [-1] _)                 => (MOVDconst [-1])
  1155(ORWconst [c] _) && int32(c)==-1  => (MOVDconst [-1])
  1156(XORconst [0] x)                  => x
  1157(XORWconst [c] x) && int32(c)==0   => x
  1158
  1159// Shifts by zero (may be inserted during multiplication strength reduction).
  1160((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x
  1161
  1162// Convert constant subtracts to constant adds.
  1163(SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x)
  1164(SUBWconst [c] x) => (ADDWconst [-int32(c)] x)
  1165
  1166// generic constant folding
  1167// TODO: more of this
  1168(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
  1169(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
  1170(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x)
  1171(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x)
  1172(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)])
  1173(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x)
  1174(SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
  1175(SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)])
  1176(NEG (MOVDconst [c])) => (MOVDconst [-c])
  1177(NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))])
  1178(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d])
  1179(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))])
  1180(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
  1181(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
  1182(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d])
  1183(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
  1184(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
  1185(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d])
  1186(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
  1187(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
  1188(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d])
  1189(LoweredRound32F x:(FMOVSconst)) => x
  1190(LoweredRound64F x:(FMOVDconst)) => x
  1191
  1192// generic simplifications
  1193// TODO: more of this
  1194(ADD x (NEG y)) => (SUB x y)
  1195(ADDW x (NEGW y)) => (SUBW x y)
  1196(SUB x (NEG y)) => (ADD x y)
  1197(SUBW x (NEGW y)) => (ADDW x y)
  1198(SUB x x) => (MOVDconst [0])
  1199(SUBW x x) => (MOVDconst [0])
  1200(AND x x) => x
  1201(ANDW x x) => x
  1202(OR x x) => x
  1203(ORW x x) => x
  1204(XOR x x) => (MOVDconst [0])
  1205(XORW x x) => (MOVDconst [0])
  1206(NEG (NEG x)) => x
  1207(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x)
  1208(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
  1209(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
  1210(MOVBreg  (ANDWconst [m] x)) &&  int8(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
  1211(MOVHreg  (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
  1212
  1213// carry flag generation
  1214// (only constant fold carry of zero)
  1215(Select1 (ADDCconst (MOVDconst [c]) [d]))
  1216  && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
  1217  => (FlagEQ)
  1218(Select1 (ADDCconst (MOVDconst [c]) [d]))
  1219  && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
  1220  => (FlagLT)
  1221
  1222// borrow flag generation
  1223// (only constant fold borrow of zero)
  1224(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
  1225  && uint64(d) <= uint64(c) && c-d == 0
  1226  => (FlagGT)
  1227(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
  1228  && uint64(d) <= uint64(c) && c-d != 0
  1229  => (FlagOV)
  1230
  1231// add with carry
  1232(ADDE x y (FlagEQ)) => (ADDC x y)
  1233(ADDE x y (FlagLT)) => (ADDC x y)
  1234(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)])
  1235(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)])
  1236
  1237// subtract with borrow
  1238(SUBE x y (FlagGT)) => (SUBC x y)
  1239(SUBE x y (FlagOV)) => (SUBC x y)
  1240(Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d])
  1241
  1242// collapse carry chain
  1243(ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
  1244  => (ADDE x y c)
  1245
  1246// collapse borrow chain
  1247(SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
  1248  => (SUBE x y c)
  1249
  1250// branch on carry
  1251(C(G|LG)IJ {s390x.Equal}         (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry)
  1252(C(G|LG)IJ {s390x.Equal}         (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry}   carry)
  1253(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry}   carry)
  1254(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry)
  1255(C(G|LG)IJ {s390x.Greater}       (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry}   carry)
  1256
  1257// branch on borrow
  1258(C(G|LG)IJ {s390x.Equal}         (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow)
  1259(C(G|LG)IJ {s390x.Equal}         (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow}   borrow)
  1260(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow}   borrow)
  1261(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow)
  1262(C(G|LG)IJ {s390x.Greater}       (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow}   borrow)
  1263
  1264// fused multiply-add
  1265(Select0 (F(ADD|SUB) (FMUL y z) x)) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
  1266(Select0 (F(ADDS|SUBS) (FMULS y z) x)) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
  1267
  1268// Convert floating point comparisons against zero into 'load and test' instructions.
  1269(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x)
  1270(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR <v.Type> x))
  1271
  1272// FSUB, FSUBS, FADD, FADDS now produce a condition code representing the
  1273// comparison of the result with 0.0. If a compare with zero instruction
  1274// (e.g. LTDBR) is following one of those instructions, we can use the
  1275// generated flag and remove the comparison instruction.
  1276// Note: when inserting Select1 ops we need to ensure they are in the
  1277// same block as their argument. We could also use @x.Block for this
  1278// but moving the flag generating value to a different block seems to
  1279// increase the likelihood that the flags value will have to be regenerated
  1280// by flagalloc which is not what we want.
  1281(LTDBR (Select0 x:(F(ADD|SUB) _ _)))   && b == x.Block => (Select1 x)
  1282(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x)
  1283
  1284// Fold memory operations into operations.
  1285// Exclude global data (SB) because these instructions cannot handle relative addresses.
  1286// TODO(mundaym): indexed versions of these?
  1287((ADD|SUB|MULLD|AND|OR|XOR) <t> x g:(MOVDload [off] {sym} ptr mem))
  1288  && ptr.Op != OpSB
  1289  && is20Bit(int64(off))
  1290  && canMergeLoadClobber(v, g, x)
  1291  && clobber(g)
  1292  => ((ADD|SUB|MULLD|AND|OR|XOR)load <t> [off] {sym} x ptr mem)
  1293((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWload [off] {sym} ptr mem))
  1294  && ptr.Op != OpSB
  1295  && is20Bit(int64(off))
  1296  && canMergeLoadClobber(v, g, x)
  1297  && clobber(g)
  1298  => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
  1299((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWZload [off] {sym} ptr mem))
  1300  && ptr.Op != OpSB
  1301  && is20Bit(int64(off))
  1302  && canMergeLoadClobber(v, g, x)
  1303  && clobber(g)
  1304  => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
  1305
  1306// Combine stores into store multiples.
  1307// 32-bit
  1308(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
  1309  && p.Op != OpSB
  1310  && x.Uses == 1
  1311  && is20Bit(int64(i)-4)
  1312  && setPos(v, x.Pos)
  1313  && clobber(x)
  1314  => (STM2 [i-4] {s} p w0 w1 mem)
  1315(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
  1316  && x.Uses == 1
  1317  && is20Bit(int64(i)-8)
  1318  && setPos(v, x.Pos)
  1319  && clobber(x)
  1320  => (STM3 [i-8] {s} p w0 w1 w2 mem)
  1321(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
  1322  && x.Uses == 1
  1323  && is20Bit(int64(i)-12)
  1324  && setPos(v, x.Pos)
  1325  && clobber(x)
  1326  => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
  1327(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
  1328  && x.Uses == 1
  1329  && is20Bit(int64(i)-8)
  1330  && setPos(v, x.Pos)
  1331  && clobber(x)
  1332  => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
  1333// 64-bit
  1334(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
  1335  && p.Op != OpSB
  1336  && x.Uses == 1
  1337  && is20Bit(int64(i)-8)
  1338  && setPos(v, x.Pos)
  1339  && clobber(x)
  1340  => (STMG2 [i-8] {s} p w0 w1 mem)
  1341(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
  1342  && x.Uses == 1
  1343  && is20Bit(int64(i)-16)
  1344  && setPos(v, x.Pos)
  1345  && clobber(x)
  1346  => (STMG3 [i-16] {s} p w0 w1 w2 mem)
  1347(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
  1348  && x.Uses == 1
  1349  && is20Bit(int64(i)-24)
  1350  && setPos(v, x.Pos)
  1351  && clobber(x)
  1352  => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
  1353(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
  1354  && x.Uses == 1
  1355  && is20Bit(int64(i)-16)
  1356  && setPos(v, x.Pos)
  1357  && clobber(x)
  1358  => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
  1359
  1360// Convert 32-bit store multiples into 64-bit stores.
  1361(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem)
  1362
  1363// Fold bit reversal into loads.
  1364(MOVWBR x:(MOVWZload    [off] {sym} ptr     mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRload    [off] {sym} ptr     mem)) // need zero extension?
  1365(MOVWBR x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRloadidx [off] {sym} ptr idx mem)) // need zero extension?
  1366(MOVDBR x:(MOVDload     [off] {sym} ptr     mem)) && x.Uses == 1 => @x.Block (MOVDBRload    [off] {sym} ptr     mem)
  1367(MOVDBR x:(MOVDloadidx  [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx [off] {sym} ptr idx mem)
  1368
  1369// Fold bit reversal into stores.
  1370(MOV(D|W)store    [off] {sym} ptr     r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstore    [off] {sym} ptr     x mem)
  1371(MOV(D|W)storeidx [off] {sym} ptr idx r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstoreidx [off] {sym} ptr idx x mem)
  1372
  1373// Special bswap16 rules
  1374(Bswap16 x:(MOVHZload    [off] {sym} ptr     mem)) => @x.Block (MOVHZreg (MOVHBRload    [off] {sym} ptr     mem))
  1375(Bswap16 x:(MOVHZloadidx [off] {sym} ptr idx mem)) => @x.Block (MOVHZreg (MOVHBRloadidx [off] {sym} ptr idx mem))
  1376(MOVHstore    [off] {sym} ptr     (Bswap16 val) mem) => (MOVHBRstore    [off] {sym} ptr     val mem)
  1377(MOVHstoreidx [off] {sym} ptr idx (Bswap16 val) mem) => (MOVHBRstoreidx [off] {sym} ptr idx val mem)

View as plain text