1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5(Add(Ptr|64|32|16|8) ...) => (ADD ...)
6(Add(32|64)F ...) => (FADD(S|D) ...)
7
8(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
9(Sub(32|64)F ...) => (FSUB(S|D) ...)
10
11(Mul64 ...) => (MUL ...)
12(Mul(32|16|8) ...) => (MULW ...)
13(Mul(32|64)F ...) => (FMUL(S|D) ...)
14
15(Hmul64 ...) => (MULH ...)
16(Hmul64u ...) => (UMULH ...)
17(Hmul32 x y) => (SRAconst (MULL <typ.Int64> x y) [32])
18(Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
19(Select0 (Mul64uhilo x y)) => (UMULH x y)
20(Select1 (Mul64uhilo x y)) => (MUL x y)
21
22(Div64 [false] x y) => (DIV x y)
23(Div32 [false] x y) => (DIVW x y)
24(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
25(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
26(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
27(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
28(Div64u ...) => (UDIV ...)
29(Div32u ...) => (UDIVW ...)
30(Div32F ...) => (FDIVS ...)
31(Div64F ...) => (FDIVD ...)
32
33(Mod64 x y) => (MOD x y)
34(Mod32 x y) => (MODW x y)
35(Mod64u ...) => (UMOD ...)
36(Mod32u ...) => (UMODW ...)
37(Mod(16|8) x y) => (MODW (SignExt(16|8)to32 x) (SignExt(16|8)to32 y))
38(Mod(16|8)u x y) => (UMODW (ZeroExt(16|8)to32 x) (ZeroExt(16|8)to32 y))
39
40// (x + y) / 2 with x>=y => (x - y) / 2 + y
41(Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
42
43(And(64|32|16|8) ...) => (AND ...)
44(Or(64|32|16|8) ...) => (OR ...)
45(Xor(64|32|16|8) ...) => (XOR ...)
46
47// unary ops
48(Neg(64|32|16|8) ...) => (NEG ...)
49(Neg(32|64)F ...) => (FNEG(S|D) ...)
50(Com(64|32|16|8) ...) => (MVN ...)
51
52// math package intrinsics
53(Abs ...) => (FABSD ...)
54(Sqrt ...) => (FSQRTD ...)
55(Ceil ...) => (FRINTPD ...)
56(Floor ...) => (FRINTMD ...)
57(Round ...) => (FRINTAD ...)
58(RoundToEven ...) => (FRINTND ...)
59(Trunc ...) => (FRINTZD ...)
60(FMA x y z) => (FMADDD z x y)
61
62(Sqrt32 ...) => (FSQRTS ...)
63
64(Min(64|32)F ...) => (FMIN(D|S) ...)
65(Max(64|32)F ...) => (FMAX(D|S) ...)
66
67// lowering rotates
68// we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
69(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
70(RotateLeft8 <t> x y) => (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y))))
71(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
72(RotateLeft16 <t> x y) => (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y))
73(RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
74(RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
75
76(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
77
78(Ctz64 <t> x) => (CLZ (RBIT <t> x))
79(Ctz32 <t> x) => (CLZW (RBITW <t> x))
80(Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
81(Ctz8 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
82
83(PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
84(PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
85(PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
86
87// Load args directly into the register class where it will be used.
88(FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
89(FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
90
91// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
92(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
93(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
94(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
95(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
96
97// float <=> int register moves, with no conversion.
98// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
99(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
100(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val)
101(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
102(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val)
103
104(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
105(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
106(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
107
108(Bswap64 ...) => (REV ...)
109(Bswap32 ...) => (REVW ...)
110(Bswap16 ...) => (REV16W ...)
111
112(BitRev64 ...) => (RBIT ...)
113(BitRev32 ...) => (RBITW ...)
114(BitRev16 x) => (SRLconst [48] (RBIT <typ.UInt64> x))
115(BitRev8 x) => (SRLconst [56] (RBIT <typ.UInt64> x))
116
117// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
118// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
119// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
120// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
121(UMOD <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
122(UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
123
124// 64-bit addition with carry.
125(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
126(Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
127
128// 64-bit subtraction with borrowing.
129(Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
130(Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
131
132// boolean ops -- booleans are represented with 0=false, 1=true
133(AndB ...) => (AND ...)
134(OrB ...) => (OR ...)
135(EqB x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
136(NeqB ...) => (XOR ...)
137(Not x) => (XOR (MOVDconst [1]) x)
138
139// shifts
140// hardware instruction uses only the low 6 bits of the shift
141// we compare to 64 to ensure Go semantics for large shifts
142// Rules about rotates with non-const shift are based on the following rules,
143// if the following rules change, please also modify the rules based on them.
144
145// check shiftIsBounded first, if shift value is proved to be valid then we
146// can do the shift directly.
147// left shift
148(Lsh(64|32|16|8)x64 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
149(Lsh(64|32|16|8)x32 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
150(Lsh(64|32|16|8)x16 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
151(Lsh(64|32|16|8)x8 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
152
153// signed right shift
154(Rsh64x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> x y)
155(Rsh32x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) y)
156(Rsh16x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) y)
157(Rsh8x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) y)
158
159// unsigned right shift
160(Rsh64Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> x y)
161(Rsh32Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt32to64 x) y)
162(Rsh16Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt16to64 x) y)
163(Rsh8Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt8to64 x) y)
164
165// shift value may be out of range, use CMP + CSEL instead
166(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
167(Lsh64x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
168
169(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
170(Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
171
172(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
173(Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
174
175(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
176(Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
177
178(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
179(Rsh64Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
180
181(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
182(Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
183
184(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
185(Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
186
187(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
188(Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
189
190(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
191(Rsh64x(32|16|8) x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
192
193(Rsh32x64 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
194(Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
195
196(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
197(Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
198
199(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
200(Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
201
202// constants
203(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
204(Const(32|64)F [val]) => (FMOV(S|D)const [float64(val)])
205(ConstNil) => (MOVDconst [0])
206(ConstBool [t]) => (MOVDconst [b2i(t)])
207
208(Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
209
210// truncations
211// Because we ignore high parts of registers, truncates are just copies.
212(Trunc16to8 ...) => (Copy ...)
213(Trunc32to8 ...) => (Copy ...)
214(Trunc32to16 ...) => (Copy ...)
215(Trunc64to8 ...) => (Copy ...)
216(Trunc64to16 ...) => (Copy ...)
217(Trunc64to32 ...) => (Copy ...)
218
219// Zero-/Sign-extensions
220(ZeroExt8to16 ...) => (MOVBUreg ...)
221(ZeroExt8to32 ...) => (MOVBUreg ...)
222(ZeroExt16to32 ...) => (MOVHUreg ...)
223(ZeroExt8to64 ...) => (MOVBUreg ...)
224(ZeroExt16to64 ...) => (MOVHUreg ...)
225(ZeroExt32to64 ...) => (MOVWUreg ...)
226
227(SignExt8to16 ...) => (MOVBreg ...)
228(SignExt8to32 ...) => (MOVBreg ...)
229(SignExt16to32 ...) => (MOVHreg ...)
230(SignExt8to64 ...) => (MOVBreg ...)
231(SignExt16to64 ...) => (MOVHreg ...)
232(SignExt32to64 ...) => (MOVWreg ...)
233
234// float <=> int conversion
235(Cvt32to32F ...) => (SCVTFWS ...)
236(Cvt32to64F ...) => (SCVTFWD ...)
237(Cvt64to32F ...) => (SCVTFS ...)
238(Cvt64to64F ...) => (SCVTFD ...)
239(Cvt32Uto32F ...) => (UCVTFWS ...)
240(Cvt32Uto64F ...) => (UCVTFWD ...)
241(Cvt64Uto32F ...) => (UCVTFS ...)
242(Cvt64Uto64F ...) => (UCVTFD ...)
243(Cvt32Fto32 ...) => (FCVTZSSW ...)
244(Cvt64Fto32 ...) => (FCVTZSDW ...)
245(Cvt32Fto64 ...) => (FCVTZSS ...)
246(Cvt64Fto64 ...) => (FCVTZSD ...)
247(Cvt32Fto32U ...) => (FCVTZUSW ...)
248(Cvt64Fto32U ...) => (FCVTZUDW ...)
249(Cvt32Fto64U ...) => (FCVTZUS ...)
250(Cvt64Fto64U ...) => (FCVTZUD ...)
251(Cvt32Fto64F ...) => (FCVTSD ...)
252(Cvt64Fto32F ...) => (FCVTDS ...)
253
254(CvtBoolToUint8 ...) => (Copy ...)
255
256(Round32F ...) => (LoweredRound32F ...)
257(Round64F ...) => (LoweredRound64F ...)
258
259// comparisons
260(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
261(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
262(Eq32 x y) => (Equal (CMPW x y))
263(Eq64 x y) => (Equal (CMP x y))
264(EqPtr x y) => (Equal (CMP x y))
265(Eq32F x y) => (Equal (FCMPS x y))
266(Eq64F x y) => (Equal (FCMPD x y))
267
268(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
269(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
270(Neq32 x y) => (NotEqual (CMPW x y))
271(Neq64 x y) => (NotEqual (CMP x y))
272(NeqPtr x y) => (NotEqual (CMP x y))
273(Neq(32|64)F x y) => (NotEqual (FCMP(S|D) x y))
274
275(Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
276(Less32 x y) => (LessThan (CMPW x y))
277(Less64 x y) => (LessThan (CMP x y))
278
279// Set condition flags for floating-point comparisons "x < y"
280// and "x <= y". Because if either or both of the operands are
281// NaNs, all three of (x < y), (x == y) and (x > y) are false,
282// and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
283// of this case to (0, 0, 1, 1).
284(Less32F x y) => (LessThanF (FCMPS x y))
285(Less64F x y) => (LessThanF (FCMPD x y))
286
287// For an unsigned integer x, the following rules are useful when combining branch
288// 0 < x => x != 0
289// x <= 0 => x == 0
290// x < 1 => x == 0
291// 1 <= x => x != 0
292(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
293(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero)
294(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0]))
295(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
296
297(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
298(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
299(Less32U x y) => (LessThanU (CMPW x y))
300(Less64U x y) => (LessThanU (CMP x y))
301
302(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
303(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
304(Leq32 x y) => (LessEqual (CMPW x y))
305(Leq64 x y) => (LessEqual (CMP x y))
306
307// Refer to the comments for op Less64F above.
308(Leq32F x y) => (LessEqualF (FCMPS x y))
309(Leq64F x y) => (LessEqualF (FCMPD x y))
310
311(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
312(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
313(Leq32U x y) => (LessEqualU (CMPW x y))
314(Leq64U x y) => (LessEqualU (CMP x y))
315
316// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
317(FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
318(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
319(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
320(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
321
322// CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
323(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
324(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
325
326(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
327(OffPtr [off] ptr) => (ADDconst [off] ptr)
328
329(Addr {sym} base) => (MOVDaddr {sym} base)
330(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
331(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
332
333// loads
334(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
335(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
336(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
337(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
338(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
339(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
340(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
341(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
342(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
343(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
344
345// stores
346(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
347(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
348(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
349(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
350(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem)
351(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
352
353// zeroing
354(Zero [0] _ mem) => mem
355(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
356(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
357(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
358(Zero [3] ptr mem) =>
359 (MOVBstore [2] ptr (MOVDconst [0])
360 (MOVHstore ptr (MOVDconst [0]) mem))
361(Zero [5] ptr mem) =>
362 (MOVBstore [4] ptr (MOVDconst [0])
363 (MOVWstore ptr (MOVDconst [0]) mem))
364(Zero [6] ptr mem) =>
365 (MOVHstore [4] ptr (MOVDconst [0])
366 (MOVWstore ptr (MOVDconst [0]) mem))
367(Zero [7] ptr mem) =>
368 (MOVWstore [3] ptr (MOVDconst [0])
369 (MOVWstore ptr (MOVDconst [0]) mem))
370(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
371(Zero [9] ptr mem) =>
372 (MOVBstore [8] ptr (MOVDconst [0])
373 (MOVDstore ptr (MOVDconst [0]) mem))
374(Zero [10] ptr mem) =>
375 (MOVHstore [8] ptr (MOVDconst [0])
376 (MOVDstore ptr (MOVDconst [0]) mem))
377(Zero [11] ptr mem) =>
378 (MOVDstore [3] ptr (MOVDconst [0])
379 (MOVDstore ptr (MOVDconst [0]) mem))
380(Zero [12] ptr mem) =>
381 (MOVWstore [8] ptr (MOVDconst [0])
382 (MOVDstore ptr (MOVDconst [0]) mem))
383(Zero [13] ptr mem) =>
384 (MOVDstore [5] ptr (MOVDconst [0])
385 (MOVDstore ptr (MOVDconst [0]) mem))
386(Zero [14] ptr mem) =>
387 (MOVDstore [6] ptr (MOVDconst [0])
388 (MOVDstore ptr (MOVDconst [0]) mem))
389(Zero [15] ptr mem) =>
390 (MOVDstore [7] ptr (MOVDconst [0])
391 (MOVDstore ptr (MOVDconst [0]) mem))
392(Zero [16] ptr mem) =>
393 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
394
395(Zero [s] ptr mem) && s > 16 && s < 192 => (LoweredZero [s] ptr mem)
396(Zero [s] ptr mem) && s >= 192 => (LoweredZeroLoop [s] ptr mem)
397
398// moves
399(Move [0] _ _ mem) => mem
400(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
401(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
402(Move [3] dst src mem) =>
403 (MOVBstore [2] dst (MOVBUload [2] src mem)
404 (MOVHstore dst (MOVHUload src mem) mem))
405(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
406(Move [5] dst src mem) =>
407 (MOVBstore [4] dst (MOVBUload [4] src mem)
408 (MOVWstore dst (MOVWUload src mem) mem))
409(Move [6] dst src mem) =>
410 (MOVHstore [4] dst (MOVHUload [4] src mem)
411 (MOVWstore dst (MOVWUload src mem) mem))
412(Move [7] dst src mem) =>
413 (MOVWstore [3] dst (MOVWUload [3] src mem)
414 (MOVWstore dst (MOVWUload src mem) mem))
415(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
416(Move [9] dst src mem) =>
417 (MOVBstore [8] dst (MOVBUload [8] src mem)
418 (MOVDstore dst (MOVDload src mem) mem))
419(Move [10] dst src mem) =>
420 (MOVHstore [8] dst (MOVHUload [8] src mem)
421 (MOVDstore dst (MOVDload src mem) mem))
422(Move [11] dst src mem) =>
423 (MOVDstore [3] dst (MOVDload [3] src mem)
424 (MOVDstore dst (MOVDload src mem) mem))
425(Move [12] dst src mem) =>
426 (MOVWstore [8] dst (MOVWUload [8] src mem)
427 (MOVDstore dst (MOVDload src mem) mem))
428(Move [13] dst src mem) =>
429 (MOVDstore [5] dst (MOVDload [5] src mem)
430 (MOVDstore dst (MOVDload src mem) mem))
431(Move [14] dst src mem) =>
432 (MOVDstore [6] dst (MOVDload [6] src mem)
433 (MOVDstore dst (MOVDload src mem) mem))
434(Move [15] dst src mem) =>
435 (MOVDstore [7] dst (MOVDload [7] src mem)
436 (MOVDstore dst (MOVDload src mem) mem))
437(Move [16] dst src mem) =>
438 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)
439
440(Move [s] dst src mem) && s > 16 && s <= 24 =>
441 (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
442 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
443(Move [s] dst src mem) && s > 24 && s <= 32 =>
444 (STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
445 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
446(Move [s] dst src mem) && s > 32 && s <= 40 =>
447 (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
448 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
449 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
450(Move [s] dst src mem) && s > 40 && s <= 48 =>
451 (STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
452 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
453 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
454(Move [s] dst src mem) && s > 48 && s <= 56 =>
455 (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
456 (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
457 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
458 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
459(Move [s] dst src mem) && s > 56 && s <= 64 =>
460 (STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
461 (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
462 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
463 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
464
465(Move [s] dst src mem) && s > 64 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
466(Move [s] dst src mem) && s >= 192 && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
467
468// calls
469(StaticCall ...) => (CALLstatic ...)
470(ClosureCall ...) => (CALLclosure ...)
471(InterCall ...) => (CALLinter ...)
472(TailCall ...) => (CALLtail ...)
473
474// checks
475(NilCheck ...) => (LoweredNilCheck ...)
476(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
477(IsInBounds idx len) => (LessThanU (CMP idx len))
478(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
479
480// pseudo-ops
481(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
482(GetCallerSP ...) => (LoweredGetCallerSP ...)
483(GetCallerPC ...) => (LoweredGetCallerPC ...)
484(MemEq ...) => (LoweredMemEq ...)
485
486// Absorb pseudo-ops into blocks.
487(If (Equal cc) yes no) => (EQ cc yes no)
488(If (NotEqual cc) yes no) => (NE cc yes no)
489(If (LessThan cc) yes no) => (LT cc yes no)
490(If (LessThanU cc) yes no) => (ULT cc yes no)
491(If (LessEqual cc) yes no) => (LE cc yes no)
492(If (LessEqualU cc) yes no) => (ULE cc yes no)
493(If (GreaterThan cc) yes no) => (GT cc yes no)
494(If (GreaterThanU cc) yes no) => (UGT cc yes no)
495(If (GreaterEqual cc) yes no) => (GE cc yes no)
496(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
497(If (LessThanF cc) yes no) => (FLT cc yes no)
498(If (LessEqualF cc) yes no) => (FLE cc yes no)
499(If (GreaterThanF cc) yes no) => (FGT cc yes no)
500(If (GreaterEqualF cc) yes no) => (FGE cc yes no)
501
502(If cond yes no) => (TBNZ [0] cond yes no)
503
504(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
505
506// atomic intrinsics
507// Note: these ops do not accept offset.
508(AtomicLoad8 ...) => (LDARB ...)
509(AtomicLoad32 ...) => (LDARW ...)
510(AtomicLoad64 ...) => (LDAR ...)
511(AtomicLoadPtr ...) => (LDAR ...)
512
513(AtomicStore8 ...) => (STLRB ...)
514(AtomicStore32 ...) => (STLRW ...)
515(AtomicStore64 ...) => (STLR ...)
516(AtomicStorePtrNoWB ...) => (STLR ...)
517
518(AtomicExchange(8|32|64) ...) => (LoweredAtomicExchange(8|32|64) ...)
519(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
520(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
521
522(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
523(AtomicExchange(8|32|64)Variant ...) => (LoweredAtomicExchange(8|32|64)Variant ...)
524(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
525
526// Return old contents.
527(AtomicAnd(64|32|8)value ...) => (LoweredAtomicAnd(64|32|8) ...)
528(AtomicOr(64|32|8)value ...) => (LoweredAtomicOr(64|32|8) ...)
529(AtomicAnd(64|32|8)valueVariant ...) => (LoweredAtomicAnd(64|32|8)Variant ...)
530(AtomicOr(64|32|8)valueVariant ...) => (LoweredAtomicOr(64|32|8)Variant ...)
531
532// Write barrier.
533(WB ...) => (LoweredWB ...)
534
535// Publication barrier (0xe is ST option)
536(PubBarrier mem) => (DMB [0xe] mem)
537
538(PanicBounds ...) => (LoweredPanicBoundsRR ...)
539(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
540(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
541(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
542(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
543
544// Optimizations
545
546// Absorb boolean tests into block
547(NZ (Equal cc) yes no) => (EQ cc yes no)
548(NZ (NotEqual cc) yes no) => (NE cc yes no)
549(NZ (LessThan cc) yes no) => (LT cc yes no)
550(NZ (LessThanU cc) yes no) => (ULT cc yes no)
551(NZ (LessEqual cc) yes no) => (LE cc yes no)
552(NZ (LessEqualU cc) yes no) => (ULE cc yes no)
553(NZ (GreaterThan cc) yes no) => (GT cc yes no)
554(NZ (GreaterThanU cc) yes no) => (UGT cc yes no)
555(NZ (GreaterEqual cc) yes no) => (GE cc yes no)
556(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
557(NZ (LessThanF cc) yes no) => (FLT cc yes no)
558(NZ (LessEqualF cc) yes no) => (FLE cc yes no)
559(NZ (GreaterThanF cc) yes no) => (FGT cc yes no)
560(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
561
562(TBNZ [0] (Equal cc) yes no) => (EQ cc yes no)
563(TBNZ [0] (NotEqual cc) yes no) => (NE cc yes no)
564(TBNZ [0] (LessThan cc) yes no) => (LT cc yes no)
565(TBNZ [0] (LessThanU cc) yes no) => (ULT cc yes no)
566(TBNZ [0] (LessEqual cc) yes no) => (LE cc yes no)
567(TBNZ [0] (LessEqualU cc) yes no) => (ULE cc yes no)
568(TBNZ [0] (GreaterThan cc) yes no) => (GT cc yes no)
569(TBNZ [0] (GreaterThanU cc) yes no) => (UGT cc yes no)
570(TBNZ [0] (GreaterEqual cc) yes no) => (GE cc yes no)
571(TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no)
572(TBNZ [0] (LessThanF cc) yes no) => (FLT cc yes no)
573(TBNZ [0] (LessEqualF cc) yes no) => (FLE cc yes no)
574(TBNZ [0] (GreaterThanF cc) yes no) => (FGT cc yes no)
575(TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no)
576
577(TB(Z|NZ) [0] (XORconst [1] x) yes no) => (TB(NZ|Z) [0] x yes no)
578
579((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST x y) yes no)
580((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst [c] y) yes no)
581((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW x y) yes no)
582((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTWconst [int32(c)] y) yes no)
583
584// For conditional instructions such as CSET, CSEL.
585((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] z:(AND x y))) && z.Uses == 1 =>
586 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TST x y))
587((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
588 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTWconst [int32(c)] y))
589((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] z:(AND x y))) && z.Uses == 1 =>
590 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTW x y))
591((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
592 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTconst [c] y))
593
594((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNconst [c] y) yes no)
595((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNWconst [int32(c)] y) yes no)
596((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN x y) yes no)
597((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW x y) yes no)
598
599// CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
600((EQ|NE) (CMP x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMN x y) yes no)
601((Equal|NotEqual) (CMP x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMN x y))
602
603// CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
604((EQ|NE) (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMNW x y) yes no)
605((Equal|NotEqual) (CMPW x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMNW x y))
606
607// For conditional instructions such as CSET, CSEL.
608// TODO: add support for LE, GT, overflow needs to be considered.
609((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNconst [c] y))
610((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNWconst [int32(c)] y))
611((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN x y))
612((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW x y))
613((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MADD a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN a (MUL <x.Type> x y)))
614((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MSUB a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMP a (MUL <x.Type> x y)))
615((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MADDW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW a (MULW <x.Type> x y)))
616((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MSUBW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMPW a (MULW <x.Type> x y)))
617
618((CMPconst|CMNconst) [c] y) && c < 0 && c != -1<<63 => ((CMNconst|CMPconst) [-c] y)
619((CMPWconst|CMNWconst) [c] y) && c < 0 && c != -1<<31 => ((CMNWconst|CMPWconst) [-c] y)
620
621((EQ|NE) (CMPconst [0] x) yes no) => ((Z|NZ) x yes no)
622((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no)
623
624((ULE|UGT) (CMPconst [0] x)) => ((EQ|NE) (CMPconst [0] x))
625((ULE|UGT) (CMPWconst [0] x)) => ((EQ|NE) (CMPWconst [0] x))
626
627((Z|NZ) sub:(SUB x y)) && sub.Uses == 1 => ((EQ|NE) (CMP x y))
628((ZW|NZW) sub:(SUB x y)) && sub.Uses == 1 => ((EQ|NE) (CMPW x y))
629((Z|NZ) sub:(SUBconst [c] y)) && sub.Uses == 1 => ((EQ|NE) (CMPconst [c] y))
630((ZW|NZW) sub:(SUBconst [c] y)) && sub.Uses == 1 => ((EQ|NE) (CMPWconst [int32(c)] y))
631
632((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN a (MUL <x.Type> x y)) yes no)
633((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP a (MUL <x.Type> x y)) yes no)
634((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW <x.Type> x y)) yes no)
635((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMPW a (MULW <x.Type> x y)) yes no)
636
637// Absorb bit-tests into block
638(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
639(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
640(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
641(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
642(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
643(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
644(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
645(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
646
647// Test sign-bit for signed comparisons against zero
648(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no)
649(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no)
650(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
651(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no)
652
653// fold offset into address
654(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
655 (MOVDaddr [int32(off1)+off2] {sym} ptr)
656
657// fold address into load/store.
658// Do not fold global variable access in -dynlink mode, where it will
659// be rewritten to use the GOT via REGTMP, which currently cannot handle
660// large offset.
661(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
662 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
663 (MOVBload [off1+int32(off2)] {sym} ptr mem)
664(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
665 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
666 (MOVBUload [off1+int32(off2)] {sym} ptr mem)
667(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
668 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
669 (MOVHload [off1+int32(off2)] {sym} ptr mem)
670(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
671 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
672 (MOVHUload [off1+int32(off2)] {sym} ptr mem)
673(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
674 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
675 (MOVWload [off1+int32(off2)] {sym} ptr mem)
676(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
677 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
678 (MOVWUload [off1+int32(off2)] {sym} ptr mem)
679(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
680 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
681 (MOVDload [off1+int32(off2)] {sym} ptr mem)
682(LDP [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
683 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
684 (LDP [off1+int32(off2)] {sym} ptr mem)
685(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
686 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
687 (FMOVSload [off1+int32(off2)] {sym} ptr mem)
688(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
689 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
690 (FMOVDload [off1+int32(off2)] {sym} ptr mem)
691
692// register indexed load
693(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
694(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
695(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
696(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
697(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
698(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
699(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
700(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
701(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
702
703(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
704(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
705(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
706(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
707(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
708(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
709(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
710(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
711(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
712(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
713(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
714(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
715(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
716(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
717(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
718(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
719(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
720(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
721
722// shifted register indexed load
723(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
724(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
725(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
726(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
727(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
728(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
729(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
730(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
731(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
732(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
733(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
734(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
735(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
736(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
737(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
738(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
739(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
740(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem)
741(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
742(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem)
743(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
744(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem)
745
746(FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem)
747(FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem)
748(FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem)
749(FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem)
750(FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem)
751(FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem)
752(FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem)
753(FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem)
754
755(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
756 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
757 (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
758(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
759 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
760 (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
761(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
762 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
763 (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
764(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
765 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
766 (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
767(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
768 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
769 (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
770(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
771 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
772 (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
773(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
774 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
775 (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
776
777// register indexed store
778(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
779(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
780(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
781(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
782(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
783(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
784(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
785(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
786(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
787(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
788(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
789(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
790(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
791(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
792(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
793(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
794(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
795(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
796
797// shifted register indexed store
798(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
799(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
800(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
801(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
802(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
803(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
804(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
805(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
806(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
807(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
808(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
809(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
810(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
811(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
812
813(FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem)
814(FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem)
815(FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem)
816(FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem)
817(FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem)
818(FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem)
819(FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem)
820(FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem)
821
822(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
823 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
824 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
825 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
826(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
827 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
828 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
829 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
830(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
831 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
832 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
833 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
834(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
835 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
836 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
837 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
838(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
839 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
840 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
841 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
842(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
843 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
844 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
845 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
846(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
847 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
848 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
849 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
850(LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
851 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
852 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
853 (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
854(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
855 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
856 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
857 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
858(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
859 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
860 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
861 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
862
863(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
864 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
865 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
866 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
867(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
868 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
869 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
870 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
871(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
872 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
873 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
874 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
875(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
876 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
877 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
878 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
879(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
880 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
881 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
882 (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
883(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
884 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
885 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
886 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
887(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
888 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
889 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
890 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
891
892// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
893// these seem to have bad interaction with other rules, resulting in slower code
894//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
895//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
896//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
897//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
898//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
899//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
900//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
901//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
902//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
903//(LDP [off] {sym} ptr (STP [off2] {sym2} ptr2 x y _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x y
904
905// don't extend before store
906(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
907(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
908(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
909(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
910(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
911(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
912(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
913(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
914(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
915(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
916(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
917(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
918(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem)
919(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem)
920(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem)
921(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem)
922(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem)
923(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem)
924(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem)
925(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem)
926(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem)
927(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem)
928(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem)
929(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem)
930(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
931(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
932(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
933(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
934(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
935(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
936
937// if a register move has only 1 use, just use the same register without emitting instruction
938// MOVDnop doesn't emit instruction, only for ensuring the type.
939(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
940
941// TODO: we should be able to get rid of MOVDnop all together.
942// But for now, this is enough to get rid of lots of them.
943(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
944
945// fold constant into arithmetic ops
946(ADD x (MOVDconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
947(SUB x (MOVDconst [c])) => (SUBconst [c] x)
948(AND x (MOVDconst [c])) => (ANDconst [c] x)
949(OR x (MOVDconst [c])) => (ORconst [c] x)
950(XOR x (MOVDconst [c])) => (XORconst [c] x)
951(TST x (MOVDconst [c])) => (TSTconst [c] x)
952(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
953(CMN x (MOVDconst [c])) => (CMNconst [c] x)
954(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
955(BIC x (MOVDconst [c])) => (ANDconst [^c] x)
956(EON x (MOVDconst [c])) => (XORconst [^c] x)
957(ORN x (MOVDconst [c])) => (ORconst [^c] x)
958
959(SLL x (MOVDconst [c])) => (SLLconst x [c&63])
960(SRL x (MOVDconst [c])) => (SRLconst x [c&63])
961(SRA x (MOVDconst [c])) => (SRAconst x [c&63])
962(SLL x (ANDconst [63] y)) => (SLL x y)
963(SRL x (ANDconst [63] y)) => (SRL x y)
964(SRA x (ANDconst [63] y)) => (SRA x y)
965
966(CMP x (MOVDconst [c])) => (CMPconst [c] x)
967(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
968(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
969(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
970
971(ROR x (MOVDconst [c])) => (RORconst x [c&63])
972(RORW x (MOVDconst [c])) => (RORWconst x [c&31])
973
974(ADDSflags x (MOVDconst [c])) => (ADDSconstflags [c] x)
975
976(ADDconst [c] y) && c < 0 => (SUBconst [-c] y)
977
978// Canonicalize the order of arguments to comparisons - helps with CSE.
979((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
980
981// mul-neg => mneg
982(NEG (MUL x y)) => (MNEG x y)
983(NEG (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
984(MUL (NEG x) y) => (MNEG x y)
985(MULW (NEG x) y) => (MNEGW x y)
986
987// madd/msub
988(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
989(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
990(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
991(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
992
993(ADD a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
994(SUB a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
995(ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
996(SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
997
998// madd/msub can't take constant arguments, so do a bit of reordering if a non-constant is available.
999// Note: don't reorder arithmetic concerning pointers, as we must ensure that
1000// no intermediate computations are invalid pointers.
1001(ADD <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (ADD <v.Type> a m))
1002(ADD <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (ADD <v.Type> a m))
1003(SUB <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (SUB <v.Type> a m))
1004(SUB <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (SUB <v.Type> a m))
1005
1006// optimize ADCSflags, SBCSflags and friends
1007(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
1008(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
1009(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
1010(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
1011
1012// mul by constant
1013(MUL _ (MOVDconst [0])) => (MOVDconst [0])
1014(MUL x (MOVDconst [1])) => x
1015
1016(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
1017(MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
1018
1019(MUL x (MOVDconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
1020(MULW x (MOVDconst [c])) && v.Type.Size() <= 4 && canMulStrengthReduce32(config, int32(c)) => {mulStrengthReduce32(v, x, int32(c))}
1021
1022// mneg by constant
1023(MNEG x (MOVDconst [-1])) => x
1024(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
1025(MNEG x (MOVDconst [1])) => (NEG x)
1026(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
1027(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
1028(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
1029(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
1030(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
1031(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
1032(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
1033
1034
1035(MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
1036(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
1037(MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
1038(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
1039(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
1040(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
1041(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
1042(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
1043(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
1044(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
1045
1046
1047(MADD a x (MOVDconst [-1])) => (SUB a x)
1048(MADD a _ (MOVDconst [0])) => a
1049(MADD a x (MOVDconst [1])) => (ADD a x)
1050(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
1051(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1052(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1053(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1054(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1055(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1056(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1057
1058(MADD a (MOVDconst [-1]) x) => (SUB a x)
1059(MADD a (MOVDconst [0]) _) => a
1060(MADD a (MOVDconst [1]) x) => (ADD a x)
1061(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
1062(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1063(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1064(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1065(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1066(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1067(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1068
1069(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
1070(MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
1071(MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
1072(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
1073(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1074(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1075(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1076(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1077(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1078(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1079
1080(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
1081(MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
1082(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
1083(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
1084(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1085(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1086(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1087(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1088(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1089(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1090
1091(MSUB a x (MOVDconst [-1])) => (ADD a x)
1092(MSUB a _ (MOVDconst [0])) => a
1093(MSUB a x (MOVDconst [1])) => (SUB a x)
1094(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
1095(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1096(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1097(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1098(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1099(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1100(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1101
1102(MSUB a (MOVDconst [-1]) x) => (ADD a x)
1103(MSUB a (MOVDconst [0]) _) => a
1104(MSUB a (MOVDconst [1]) x) => (SUB a x)
1105(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
1106(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1107(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1108(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1109(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1110(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1111(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1112
1113(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
1114(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
1115(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
1116(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
1117(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1118(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1119(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1120(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1121(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1122(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1123
1124(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
1125(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
1126(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
1127(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
1128(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1129(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1130(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1131(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1132(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1133(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1134
1135// div by constant
1136(UDIV x (MOVDconst [1])) => x
1137(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log64(c)] x)
1138(UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
1139(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
1140(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
1141(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
1142(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
1143(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
1144
1145// generic simplifications
1146(ADD x (NEG y)) => (SUB x y)
1147(SUB x (NEG y)) => (ADD x y)
1148(SUB x x) => (MOVDconst [0])
1149(AND x x) => x
1150(OR x x) => x
1151(XOR x x) => (MOVDconst [0])
1152(BIC x x) => (MOVDconst [0])
1153(EON x x) => (MOVDconst [-1])
1154(ORN x x) => (MOVDconst [-1])
1155(AND x (MVN y)) => (BIC x y)
1156(XOR x (MVN y)) => (EON x y)
1157(OR x (MVN y)) => (ORN x y)
1158(MVN (XOR x y)) => (EON x y)
1159(NEG (SUB x y)) => (SUB y x)
1160(NEG (NEG x)) => x
1161
1162(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
1163(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
1164(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
1165(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
1166(CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag)
1167(CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag)
1168(CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag)
1169(CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag)
1170(CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag)
1171(CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag)
1172
1173(SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
1174(SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
1175
1176// remove redundant *const ops
1177(ADDconst [0] x) => x
1178(SUBconst [0] x) => x
1179(ANDconst [0] _) => (MOVDconst [0])
1180(ANDconst [-1] x) => x
1181(ORconst [0] x) => x
1182(ORconst [-1] _) => (MOVDconst [-1])
1183(XORconst [0] x) => x
1184(XORconst [-1] x) => (MVN x)
1185
1186// generic constant folding
1187(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d])
1188(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
1189(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
1190(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c])
1191(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
1192(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
1193(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d<<uint64(c)])
1194(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
1195(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
1196(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
1197(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
1198(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
1199(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
1200(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
1201(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
1202(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
1203(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
1204(MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
1205(MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
1206(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
1207(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
1208(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
1209(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
1210(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
1211(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
1212(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
1213(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
1214(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
1215(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
1216(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
1217(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
1218(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
1219(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
1220(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
1221(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
1222(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
1223(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
1224(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
1225(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
1226(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
1227(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
1228(MVN (MOVDconst [c])) => (MOVDconst [^c])
1229(NEG (MOVDconst [c])) => (MOVDconst [-c])
1230(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
1231(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
1232(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
1233(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
1234(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
1235(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
1236(MOVDreg (MOVDconst [c])) => (MOVDconst [c])
1237
1238// constant comparisons
1239(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
1240(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
1241(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
1242(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
1243(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
1244(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
1245
1246// other known comparisons
1247(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
1248(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
1249(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
1250(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
1251(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
1252(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
1253(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
1254
1255// absorb flag constants into branches
1256(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
1257(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
1258
1259(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
1260(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
1261
1262(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
1263(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
1264
1265(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
1266(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
1267
1268(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
1269(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
1270
1271(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
1272(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
1273
1274(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
1275(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
1276
1277(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
1278(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
1279
1280(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
1281(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
1282
1283(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
1284(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
1285
1286(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
1287(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
1288
1289(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
1290(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
1291
1292(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
1293(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
1294
1295(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
1296(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
1297
1298(Z (MOVDconst [0]) yes no) => (First yes no)
1299(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
1300(NZ (MOVDconst [0]) yes no) => (First no yes)
1301(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
1302(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
1303(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
1304(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
1305(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
1306
1307// absorb InvertFlags into branches
1308(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
1309(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
1310(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
1311(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
1312(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
1313(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
1314(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
1315(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
1316(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
1317(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
1318(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
1319(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
1320(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
1321(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
1322(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
1323(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
1324(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
1325(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
1326
1327// absorb InvertFlags into conditional instructions
1328(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp)
1329(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp)
1330(CSETM [cc] (InvertFlags cmp)) => (CSETM [arm64Invert(cc)] cmp)
1331(CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp)
1332(CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp)
1333(CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp)
1334
1335// absorb flag constants into boolean values
1336(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
1337(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
1338(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
1339(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
1340(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
1341(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
1342(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
1343(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
1344(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
1345(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
1346(LessThanNoov (FlagConstant [fc])) => (MOVDconst [b2i(fc.ltNoov())])
1347(GreaterEqualNoov (FlagConstant [fc])) => (MOVDconst [b2i(fc.geNoov())])
1348
1349// absorb InvertFlags into boolean values
1350(Equal (InvertFlags x)) => (Equal x)
1351(NotEqual (InvertFlags x)) => (NotEqual x)
1352(LessThan (InvertFlags x)) => (GreaterThan x)
1353(LessThanU (InvertFlags x)) => (GreaterThanU x)
1354(GreaterThan (InvertFlags x)) => (LessThan x)
1355(GreaterThanU (InvertFlags x)) => (LessThanU x)
1356(LessEqual (InvertFlags x)) => (GreaterEqual x)
1357(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
1358(GreaterEqual (InvertFlags x)) => (LessEqual x)
1359(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
1360(LessThanF (InvertFlags x)) => (GreaterThanF x)
1361(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
1362(GreaterThanF (InvertFlags x)) => (LessThanF x)
1363(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
1364(LessThanNoov (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
1365(GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
1366
1367// Don't bother extending if we're not using the higher bits.
1368(MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
1369(MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
1370(MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
1371
1372// omit sign extension
1373(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
1374(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
1375(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
1376
1377// absorb flag constants into conditional instructions
1378(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1379(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
1380(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x
1381(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
1382(CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1383(CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y)
1384(CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1385(CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y)
1386(CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1387(CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y)
1388(CSETM [cc] flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1])
1389(CSETM [cc] flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
1390
1391// absorb flags back into boolean CSEL
1392(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
1393 (CSEL [boolval.Op] x y flagArg(boolval))
1394(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
1395 (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
1396(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
1397 (CSEL0 [boolval.Op] x flagArg(boolval))
1398(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
1399 (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
1400
1401// absorb shifts into ops
1402(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
1403(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
1404(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
1405(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
1406(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
1407(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
1408(MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
1409(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
1410(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
1411(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
1412(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
1413(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
1414(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
1415(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
1416(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
1417(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
1418(AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
1419(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
1420(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
1421(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
1422(OR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO x0 y [c])
1423(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
1424(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
1425(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
1426(XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
1427(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
1428(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
1429(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
1430(BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
1431(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
1432(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
1433(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
1434(ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
1435(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
1436(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
1437(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
1438(EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
1439(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
1440(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
1441(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
1442(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
1443(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
1444(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
1445(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
1446(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
1447(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
1448(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
1449(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
1450(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
1451(TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
1452
1453// prefer *const ops to *shift ops
1454(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
1455(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
1456(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
1457(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
1458(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
1459(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
1460(ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
1461(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
1462(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
1463(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
1464(ORshiftRO (MOVDconst [c]) x [d]) => (ORconst [c] (RORconst <x.Type> x [d]))
1465(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
1466(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
1467(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
1468(XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
1469(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
1470(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
1471(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
1472(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
1473(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
1474(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
1475(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
1476(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
1477(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
1478(TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
1479
1480// constant folding in *shift ops
1481(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
1482(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
1483(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
1484(MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
1485(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
1486(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
1487(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
1488(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
1489(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
1490(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
1491(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
1492(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
1493(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
1494(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
1495(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
1496(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
1497(ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
1498(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
1499(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
1500(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
1501(ORshiftRO x (MOVDconst [c]) [d]) => (ORconst x [rotateRight64(c, d)])
1502(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
1503(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
1504(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
1505(XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
1506(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
1507(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
1508(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
1509(BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
1510(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
1511(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
1512(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
1513(ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst x [^rotateRight64(c, d)])
1514(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
1515(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
1516(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
1517(EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
1518(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
1519(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
1520(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
1521(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
1522(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
1523(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
1524(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
1525(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
1526(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
1527(TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
1528
1529// simplification with *shift ops
1530(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
1531(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
1532(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
1533(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
1534(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
1535(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
1536(ANDshiftRO y:(RORconst x [c]) x [c]) => y
1537(ORshiftLL y:(SLLconst x [c]) x [c]) => y
1538(ORshiftRL y:(SRLconst x [c]) x [c]) => y
1539(ORshiftRA y:(SRAconst x [c]) x [c]) => y
1540(ORshiftRO y:(RORconst x [c]) x [c]) => y
1541(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
1542(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
1543(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
1544(XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
1545(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
1546(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
1547(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
1548(BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
1549(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
1550(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
1551(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
1552(EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
1553(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
1554(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
1555(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
1556(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
1557
1558// rev16w | rev16
1559// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
1560((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
1561
1562// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
1563((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
1564 && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
1565 => (REV16W x)
1566
1567// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+".
1568((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
1569 && (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
1570 => (REV16 x)
1571
1572// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
1573((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
1574 && (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
1575 => (REV16 (ANDconst <x.Type> [0xffffffff] x))
1576
1577// Extract from reg pair
1578(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
1579( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
1580(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
1581
1582(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
1583 => (EXTRWconst [32-c] x2 x)
1584( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
1585 => (EXTRWconst [32-c] x2 x)
1586(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
1587 => (EXTRWconst [32-c] x2 x)
1588
1589// Rewrite special pairs of shifts to AND.
1590// On ARM64 the bitmask can fit into an instruction.
1591(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
1592(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
1593
1594// Special case setting bit as 1. An example is math.Copysign(c,-1)
1595(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
1596
1597// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
1598(MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0])
1599(MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0])
1600(MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0])
1601
1602// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0.
1603(SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0])
1604(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
1605(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
1606
1607// Special cases for slice operations
1608(ADD x0 x1:(ANDshiftRA x2:(SLLconst [sl] y) z [63])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
1609(ADD x0 x1:(ANDshiftLL x2:(SRAconst [63] z) y [sl])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
1610
1611// bitfield ops
1612
1613// sbfiz
1614// (x << lc) >> rc
1615(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
1616// int64(x << lc)
1617(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
1618(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
1619(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
1620// int64(x) << lc
1621(SLLconst [lc] (MOVWreg x)) => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
1622(SLLconst [lc] (MOVHreg x)) => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
1623(SLLconst [lc] (MOVBreg x)) => (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
1624
1625// sbfx
1626// (x << lc) >> rc
1627(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
1628// int64(x) >> rc
1629(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
1630(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
1631(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
1632// merge sbfx and sign-extension into sbfx
1633(MOVWreg (SBFX [bfc] x)) && bfc.width() <= 32 => (SBFX [bfc] x)
1634(MOVHreg (SBFX [bfc] x)) && bfc.width() <= 16 => (SBFX [bfc] x)
1635(MOVBreg (SBFX [bfc] x)) && bfc.width() <= 8 => (SBFX [bfc] x)
1636
1637// sbfiz/sbfx combinations: merge shifts into bitfield ops
1638(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.lsb()
1639 => (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
1640(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.lsb()
1641 && sc < bfc.lsb()+bfc.width()
1642 => (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
1643(SBFX [bfc] s:(SLLconst [sc] x))
1644 && s.Uses == 1
1645 && sc <= bfc.lsb()
1646 => (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
1647(SBFX [bfc] s:(SLLconst [sc] x))
1648 && s.Uses == 1
1649 && sc > bfc.lsb()
1650 => (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
1651
1652// ubfiz
1653// (x << lc) >> rc
1654(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
1655// uint64(x) << lc
1656(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
1657(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
1658(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
1659// uint64(x << lc)
1660(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
1661(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
1662(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
1663
1664// merge ANDconst into ubfiz
1665// (x & ac) << sc
1666(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
1667 => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
1668// (x << sc) & ac
1669(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
1670 => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
1671
1672// ubfx
1673// (x << lc) >> rc
1674(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
1675// uint64(x) >> rc
1676(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
1677(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
1678(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
1679// uint64(x >> rc)
1680(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
1681(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
1682(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
1683// merge ANDconst into ubfx
1684// (x >> sc) & ac
1685(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
1686 => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
1687// (x & ac) >> sc
1688(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
1689 => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
1690// merge ANDconst and ubfx into ubfx
1691(ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
1692 (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
1693(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0) =>
1694 (UBFX [bfc] x)
1695// merge ubfx and zero-extension into ubfx
1696(MOVWUreg (UBFX [bfc] x)) && bfc.width() <= 32 => (UBFX [bfc] x)
1697(MOVHUreg (UBFX [bfc] x)) && bfc.width() <= 16 => (UBFX [bfc] x)
1698(MOVBUreg (UBFX [bfc] x)) && bfc.width() <= 8 => (UBFX [bfc] x)
1699
1700// Extracting bits from across a zero-extension boundary.
1701(UBFX [bfc] e:(MOVWUreg x))
1702 && e.Uses == 1
1703 && bfc.lsb() < 32
1704 => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
1705(UBFX [bfc] e:(MOVHUreg x))
1706 && e.Uses == 1
1707 && bfc.lsb() < 16
1708 => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
1709(UBFX [bfc] e:(MOVBUreg x))
1710 && e.Uses == 1
1711 && bfc.lsb() < 8
1712 => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
1713
1714// ubfiz/ubfx combinations: merge shifts into bitfield ops
1715(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.width()
1716 => (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
1717(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.width()+bfc.lsb() < 64
1718 => (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
1719(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.width()+bfc.lsb() < 64
1720 => (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
1721(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.width()
1722 => (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
1723// ((x << c1) >> c2) >> c3
1724(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.lsb()
1725 => (ANDconst [1<<uint(bfc.width())-1] x)
1726(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.lsb()
1727 => (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
1728(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.lsb()
1729 && sc < bfc.lsb()+bfc.width()
1730 => (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
1731// ((x << c1) << c2) >> c3
1732(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.lsb()
1733 => (ANDconst [1<<uint(bfc.width())-1] x)
1734(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.lsb()
1735 => (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
1736(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.lsb()
1737 && sc < bfc.lsb()+bfc.width()
1738 => (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
1739
1740// bfi
1741(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
1742 && ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
1743 => (BFI [bfc] y x)
1744(ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
1745 && xc == ^(yc << s) // opposite masks
1746 && yc & (yc+1) == 0 // power of 2 minus 1
1747 && yc > 0 // not 0, not all 64 bits (there are better rewrites in that case)
1748 && s+log64(yc+1) <= 64 // shifted mask doesn't overflow
1749 => (BFI [armBFAuxInt(s, log64(yc+1))] x y)
1750(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
1751 && lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
1752 => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
1753// bfxil
1754(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.width())-1)
1755 => (BFXIL [bfc] y x)
1756(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.width()
1757 => (BFXIL [bfc] y x)
1758(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
1759 => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
1760
1761// FP simplification
1762(FNEGS (FMULS x y)) => (FNMULS x y)
1763(FNEGD (FMULD x y)) => (FNMULD x y)
1764(FMULS (FNEGS x) y) => (FNMULS x y)
1765(FMULD (FNEGD x) y) => (FNMULD x y)
1766(FNEGS (FNMULS x y)) => (FMULS x y)
1767(FNEGD (FNMULD x y)) => (FMULD x y)
1768(FNMULS (FNEGS x) y) => (FMULS x y)
1769(FNMULD (FNEGD x) y) => (FMULD x y)
1770
1771(FADDS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y)
1772(FADDD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y)
1773(FSUBS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y)
1774(FSUBD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y)
1775(FSUBS (FMULS x y) a) && a.Block.Func.useFMA(v) => (FNMSUBS a x y)
1776(FSUBD (FMULD x y) a) && a.Block.Func.useFMA(v) => (FNMSUBD a x y)
1777(FADDS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y)
1778(FADDD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y)
1779(FSUBS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y)
1780(FSUBD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y)
1781(FSUBS (FNMULS x y) a) && a.Block.Func.useFMA(v) => (FNMADDS a x y)
1782(FSUBD (FNMULD x y) a) && a.Block.Func.useFMA(v) => (FNMADDD a x y)
1783
1784(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
1785(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1786(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1787(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1788(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int8(read8(sym, int64(off))))])
1789(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
1790(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
1791
1792// Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
1793(PrefetchCache addr mem) => (PRFM [0] addr mem)
1794(PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem)
1795
1796// Arch-specific inlining for small or disjoint runtime.memmove
1797(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
1798 && sz >= 0
1799 && isSameCall(sym, "runtime.memmove")
1800 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
1801 && isInlinableMemmove(dst, src, sz, config)
1802 && clobber(s1, s2, s3, call)
1803 => (Move [sz] dst src mem)
1804
1805// Match post-lowering calls, register version.
1806(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
1807 && sz >= 0
1808 && isSameCall(sym, "runtime.memmove")
1809 && call.Uses == 1
1810 && isInlinableMemmove(dst, src, sz, config)
1811 && clobber(call)
1812 => (Move [sz] dst src mem)
1813
1814((REV|REVW) ((REV|REVW) p)) => p
1815
1816// internal/runtime/math.MulUintptr intrinsics
1817
1818(Select0 (Mul64uover x y)) => (MUL x y)
1819(Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
1820
1821// 32 mul 32 -> 64
1822(MUL r:(MOVWUreg x) s:(MOVWUreg y)) && r.Uses == 1 && s.Uses == 1 => (UMULL x y)
1823(MUL r:(MOVWreg x) s:(MOVWreg y)) && r.Uses == 1 && s.Uses == 1 => (MULL x y)
View as plain text