1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Lowering arithmetic
6(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
7(AddPtr ...) => (ADDQ ...)
8(Add(32|64)F ...) => (ADDS(S|D) ...)
9
10(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
11(SubPtr ...) => (SUBQ ...)
12(Sub(32|64)F ...) => (SUBS(S|D) ...)
13
14(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
15(Mul(32|64)F ...) => (MULS(S|D) ...)
16
17(Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
18(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
19(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
20
21(Hmul(64|32) ...) => (HMUL(Q|L) ...)
22(Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
23
24(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
25(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
26(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
27(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
28(Div(32|64)F ...) => (DIVS(S|D) ...)
29
30(Select0 (Add64carry x y c)) =>
31 (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
32(Select1 (Add64carry x y c)) =>
33 (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
34(Select0 (Sub64borrow x y c)) =>
35 (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
36(Select1 (Sub64borrow x y c)) =>
37 (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
38
39// Optimize ADCQ and friends
40(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
41(ADCQ x y (FlagEQ)) => (ADDQcarry x y)
42(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
43(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
44(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
45(SBBQ x y (FlagEQ)) => (SUBQborrow x y)
46(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
47(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
48(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
49(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
50
51
52(Mul64uhilo ...) => (MULQU2 ...)
53(Div128u ...) => (DIVQU2 ...)
54
55(Avg64u ...) => (AVGQU ...)
56
57(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
58(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
59(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
60(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
61
62(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
63(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
64(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
65(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
66
67(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
68(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
69(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
70
71// Lowering boolean ops
72(AndB ...) => (ANDL ...)
73(OrB ...) => (ORL ...)
74(Not x) => (XORLconst [1] x)
75
76// Lowering pointer arithmetic
77(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
78(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
79
80// Lowering other arithmetic
81(Ctz64 x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
82(Ctz32 x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
83(Ctz64 <t> x) && buildcfg.GOAMD64 < 3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
84(Ctz32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
85(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [1<<16] x))
86(Ctz8 x) => (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
87
88(Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
89(Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
90(Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
91(Ctz8NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
92(Ctz64NonZero x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ x))
93(Ctz32NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
94(Ctz16NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
95(Ctz8NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
96
97// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
98// However, for zero-extended values, we can cheat a bit, and calculate
99// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
100// places the index of the highest set bit where we want it.
101// For GOAMD64>=3, BitLen can be calculated by OperandSize - LZCNT(x).
102(BitLen64 <t> x) && buildcfg.GOAMD64 < 3 => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
103(BitLen32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
104(BitLen16 x) && buildcfg.GOAMD64 < 3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
105(BitLen8 x) && buildcfg.GOAMD64 < 3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
106(BitLen64 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-64] (LZCNTQ x)))
107// Use 64-bit version to allow const-fold remove unnecessary arithmetic.
108(BitLen32 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL x)))
109(BitLen16 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x))))
110(BitLen8 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x))))
111
112(Bswap(64|32) ...) => (BSWAP(Q|L) ...)
113(Bswap16 x) => (ROLWconst [8] x)
114
115(PopCount(64|32) ...) => (POPCNT(Q|L) ...)
116(PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
117(PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
118
119(Sqrt ...) => (SQRTSD ...)
120(Sqrt32 ...) => (SQRTSS ...)
121
122(RoundToEven x) => (ROUNDSD [0] x)
123(Floor x) => (ROUNDSD [1] x)
124(Ceil x) => (ROUNDSD [2] x)
125(Trunc x) => (ROUNDSD [3] x)
126
127(FMA x y z) => (VFMADD231SD z x y)
128
129// Lowering extension
130// Note: we always extend to 64 bits even though some ops don't need that many result bits.
131(SignExt8to16 ...) => (MOVBQSX ...)
132(SignExt8to32 ...) => (MOVBQSX ...)
133(SignExt8to64 ...) => (MOVBQSX ...)
134(SignExt16to32 ...) => (MOVWQSX ...)
135(SignExt16to64 ...) => (MOVWQSX ...)
136(SignExt32to64 ...) => (MOVLQSX ...)
137
138(ZeroExt8to16 ...) => (MOVBQZX ...)
139(ZeroExt8to32 ...) => (MOVBQZX ...)
140(ZeroExt8to64 ...) => (MOVBQZX ...)
141(ZeroExt16to32 ...) => (MOVWQZX ...)
142(ZeroExt16to64 ...) => (MOVWQZX ...)
143(ZeroExt32to64 ...) => (MOVLQZX ...)
144
145(Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
146
147(SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
148(SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
149
150// Lowering truncation
151// Because we ignore high parts of registers, truncates are just copies.
152(Trunc16to8 ...) => (Copy ...)
153(Trunc32to8 ...) => (Copy ...)
154(Trunc32to16 ...) => (Copy ...)
155(Trunc64to8 ...) => (Copy ...)
156(Trunc64to16 ...) => (Copy ...)
157(Trunc64to32 ...) => (Copy ...)
158
159// Lowering float <-> int
160(Cvt32to32F ...) => (CVTSL2SS ...)
161(Cvt32to64F ...) => (CVTSL2SD ...)
162(Cvt64to32F ...) => (CVTSQ2SS ...)
163(Cvt64to64F ...) => (CVTSQ2SD ...)
164
165// Float, to int.
166// To make AMD64 "overflow" return max positive instead of max negative, compute
167// y and not x, smear the sign bit, and xor.
168(Cvt32Fto32 <t> x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL <t> y (SARLconst <t> [31] (ANDL <t> y:(CVTTSS2SL <t> x) (NOTL <typ.Int32> (MOVLf2i x)))))
169(Cvt64Fto32 <t> x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL <t> y (SARLconst <t> [31] (ANDL <t> y:(CVTTSD2SL <t> x) (NOTL <typ.Int32> (MOVLf2i (CVTSD2SS <typ.Float32> x))))))
170
171(Cvt32Fto64 <t> x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ <t> y (SARQconst <t> [63] (ANDQ <t> y:(CVTTSS2SQ <t> x) (NOTQ <typ.Int64> (MOVQf2i (CVTSS2SD <typ.Float64> x))) )))
172(Cvt64Fto64 <t> x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ <t> y (SARQconst <t> [63] (ANDQ <t> y:(CVTTSD2SQ <t> x) (NOTQ <typ.Int64> (MOVQf2i x)))))
173
174(Cvt32Fto32 <t> x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL <t> x)
175(Cvt32Fto64 <t> x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SQ <t> x)
176(Cvt64Fto32 <t> x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL <t> x)
177(Cvt64Fto64 <t> x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SQ <t> x)
178
179(Cvt32Fto64F ...) => (CVTSS2SD ...)
180(Cvt64Fto32F ...) => (CVTSD2SS ...)
181
182(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
183
184// Floating-point min is tricky, as the hardware op isn't right for various special
185// cases (-0 and NaN). We use two hardware ops organized just right to make the
186// result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207
187// (although that comment isn't exactly right, as the value overwritten is not simulated correctly).
188// t1 = MINSD x, y => incorrect if x==NaN or x==-0,y==+0
189// t2 = MINSD t1, x => fixes x==NaN case
190// res = POR t1, t2 => fixes x==-0,y==+0 case
191// Note that this trick depends on the special property that (NaN OR x) produces a NaN (although
192// it might not produce the same NaN as the input).
193(Min(64|32)F <t> x y) => (POR (MINS(D|S) <t> (MINS(D|S) <t> x y) x) (MINS(D|S) <t> x y))
194// Floating-point max is even trickier. Punt to using min instead.
195// max(x,y) == -min(-x,-y)
196(Max(64|32)F <t> x y) => (Neg(64|32)F <t> (Min(64|32)F <t> (Neg(64|32)F <t> x) (Neg(64|32)F <t> y)))
197
198(CvtBoolToUint8 ...) => (Copy ...)
199
200// Lowering shifts
201// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
202// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
203(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
204(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
205(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
206(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
207
208(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
209(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
210(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
211(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
212
213(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
214(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
215(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
216(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
217
218(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
219(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
220(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
221(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y)
222
223// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
224// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
225(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
226(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
227(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
228(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
229
230(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
231(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
232(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
233(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
234
235// Lowering integer comparisons
236(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
237(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
238(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y))
239(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y))
240(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
241(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
242
243// Lowering floating point comparisons
244// Note Go assembler gets UCOMISx operand order wrong, but it is right here
245// and the operands are reversed when generating assembly language.
246(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y))
247(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y))
248// Use SETGF/SETGEF with reversed operands to dodge NaN case.
249(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x))
250(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x))
251
252// Lowering loads
253(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
254(Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
255(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
256(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
257(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
258(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
259
260// Lowering stores
261(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVSDstore ptr val mem)
262(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVSSstore ptr val mem)
263(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVQstore ptr val mem)
264(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem)
265(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
266(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
267
268// Lowering moves
269(Move [0] _ _ mem) => mem
270(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
271(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
272(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
273(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
274(Move [16] dst src mem) => (MOVOstore dst (MOVOload src mem) mem)
275
276(Move [3] dst src mem) =>
277 (MOVBstore [2] dst (MOVBload [2] src mem)
278 (MOVWstore dst (MOVWload src mem) mem))
279(Move [5] dst src mem) =>
280 (MOVBstore [4] dst (MOVBload [4] src mem)
281 (MOVLstore dst (MOVLload src mem) mem))
282(Move [6] dst src mem) =>
283 (MOVWstore [4] dst (MOVWload [4] src mem)
284 (MOVLstore dst (MOVLload src mem) mem))
285(Move [7] dst src mem) =>
286 (MOVLstore [3] dst (MOVLload [3] src mem)
287 (MOVLstore dst (MOVLload src mem) mem))
288(Move [9] dst src mem) =>
289 (MOVBstore [8] dst (MOVBload [8] src mem)
290 (MOVQstore dst (MOVQload src mem) mem))
291(Move [10] dst src mem) =>
292 (MOVWstore [8] dst (MOVWload [8] src mem)
293 (MOVQstore dst (MOVQload src mem) mem))
294(Move [11] dst src mem) =>
295 (MOVLstore [7] dst (MOVLload [7] src mem)
296 (MOVQstore dst (MOVQload src mem) mem))
297(Move [12] dst src mem) =>
298 (MOVLstore [8] dst (MOVLload [8] src mem)
299 (MOVQstore dst (MOVQload src mem) mem))
300(Move [s] dst src mem) && s >= 13 && s <= 15 =>
301 (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
302 (MOVQstore dst (MOVQload src mem) mem))
303
304// Copying up to 192 bytes uses straightline code.
305(Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
306
307// Copying up to ~1KB uses a small loop.
308(Move [s] dst src mem) && s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
309
310// Large copying uses REP MOVSQ.
311(Move [s] dst src mem) && s > repMoveThreshold && s%8 != 0 =>
312 (Move [s-s%8]
313 (OffPtr <dst.Type> dst [s%8])
314 (OffPtr <src.Type> src [s%8])
315 (MOVQstore dst (MOVQload src mem) mem))
316(Move [s] dst src mem) && s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s) =>
317 (REPMOVSQ dst src (MOVQconst [s/8]) mem)
318
319// Lowering Zero instructions
320(Zero [0] _ mem) => mem
321(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
322(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
323(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
324(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
325
326(Zero [3] destptr mem) =>
327 (MOVBstoreconst [makeValAndOff(0,2)] destptr
328 (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
329(Zero [5] destptr mem) =>
330 (MOVBstoreconst [makeValAndOff(0,4)] destptr
331 (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
332(Zero [6] destptr mem) =>
333 (MOVWstoreconst [makeValAndOff(0,4)] destptr
334 (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
335(Zero [7] destptr mem) =>
336 (MOVLstoreconst [makeValAndOff(0,3)] destptr
337 (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
338
339// Zero small numbers of words directly.
340(Zero [9] destptr mem) =>
341 (MOVBstoreconst [makeValAndOff(0,8)] destptr
342 (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
343
344(Zero [10] destptr mem) =>
345 (MOVWstoreconst [makeValAndOff(0,8)] destptr
346 (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
347
348(Zero [11] destptr mem) =>
349 (MOVLstoreconst [makeValAndOff(0,7)] destptr
350 (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
351
352(Zero [12] destptr mem) =>
353 (MOVLstoreconst [makeValAndOff(0,8)] destptr
354 (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
355
356(Zero [s] destptr mem) && s > 12 && s < 16 =>
357 (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
358 (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
359
360// Zeroing up to 192 bytes uses straightline code.
361(Zero [s] destptr mem) && s >= 16 && s < 192 => (LoweredZero [s] destptr mem)
362
363// Zeroing up to ~1KB uses a small loop.
364(Zero [s] destptr mem) && s >= 192 && s <= repZeroThreshold => (LoweredZeroLoop [s] destptr mem)
365
366// Large zeroing uses REP STOSQ.
367(Zero [s] destptr mem) && s > repZeroThreshold && s%8 != 0 =>
368 (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
369 (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
370(Zero [s] destptr mem) && s > repZeroThreshold && s%8 == 0 =>
371 (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
372
373// Lowering constants
374(Const8 [c]) => (MOVLconst [int32(c)])
375(Const16 [c]) => (MOVLconst [int32(c)])
376(Const32 ...) => (MOVLconst ...)
377(Const64 ...) => (MOVQconst ...)
378(Const32F ...) => (MOVSSconst ...)
379(Const64F ...) => (MOVSDconst ...)
380(ConstNil ) => (MOVQconst [0])
381(ConstBool [c]) => (MOVLconst [b2i32(c)])
382
383// Lowering calls
384(StaticCall ...) => (CALLstatic ...)
385(ClosureCall ...) => (CALLclosure ...)
386(InterCall ...) => (CALLinter ...)
387(TailCall ...) => (CALLtail ...)
388
389// Lowering conditional moves
390// If the condition is a SETxx, we can just run a CMOV from the comparison that was
391// setting the flags.
392// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
393(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
394 => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
395(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
396 => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
397(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
398 => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
399
400(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
401 => (CMOVQNE y x (CMPQconst [0] check))
402(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
403 => (CMOVLNE y x (CMPQconst [0] check))
404(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
405 => (CMOVWNE y x (CMPQconst [0] check))
406(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && (is64BitInt(t) || isPtr(t))
407 => (CMOVQNE y x (CMPLconst [0] check))
408(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && is32BitInt(t)
409 => (CMOVLNE y x (CMPLconst [0] check))
410(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 && is16BitInt(t)
411 => (CMOVWNE y x (CMPLconst [0] check))
412(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && (is64BitInt(t) || isPtr(t))
413 => (CMOVQNE y x (CMPWconst [0] check))
414(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && is32BitInt(t)
415 => (CMOVLNE y x (CMPWconst [0] check))
416(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 && is16BitInt(t)
417 => (CMOVWNE y x (CMPWconst [0] check))
418(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && (is64BitInt(t) || isPtr(t))
419 => (CMOVQNE y x (CMPBconst [0] check))
420(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && is32BitInt(t)
421 => (CMOVLNE y x (CMPBconst [0] check))
422(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 && is16BitInt(t)
423 => (CMOVWNE y x (CMPBconst [0] check))
424
425// Absorb InvertFlags
426(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
427 => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
428(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
429 => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
430(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
431 => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
432
433// Absorb constants generated during lower
434(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
435(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
436(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
437(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
438(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
439(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
440(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
441(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
442(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
443(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
444
445// Miscellaneous
446(IsNonNil p) => (SETNE (TESTQ p p))
447(IsInBounds idx len) => (SETB (CMPQ idx len))
448(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
449(NilCheck ...) => (LoweredNilCheck ...)
450(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
451(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
452(GetCallerPC ...) => (LoweredGetCallerPC ...)
453(GetCallerSP ...) => (LoweredGetCallerSP ...)
454
455(HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
456(Addr {sym} base) => (LEAQ {sym} base)
457(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (LEAQ {sym} (SPanchored base mem))
458(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (LEAQ {sym} base)
459
460(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
461(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
462(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
463(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
464(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
465(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
466(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
467(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
468(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
469(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
470
471// block rewrites
472(If (SETL cmp) yes no) => (LT cmp yes no)
473(If (SETLE cmp) yes no) => (LE cmp yes no)
474(If (SETG cmp) yes no) => (GT cmp yes no)
475(If (SETGE cmp) yes no) => (GE cmp yes no)
476(If (SETEQ cmp) yes no) => (EQ cmp yes no)
477(If (SETNE cmp) yes no) => (NE cmp yes no)
478(If (SETB cmp) yes no) => (ULT cmp yes no)
479(If (SETBE cmp) yes no) => (ULE cmp yes no)
480(If (SETA cmp) yes no) => (UGT cmp yes no)
481(If (SETAE cmp) yes no) => (UGE cmp yes no)
482(If (SETO cmp) yes no) => (OS cmp yes no)
483
484// Special case for floating point - LF/LEF not generated
485(If (SETGF cmp) yes no) => (UGT cmp yes no)
486(If (SETGEF cmp) yes no) => (UGE cmp yes no)
487(If (SETEQF cmp) yes no) => (EQF cmp yes no)
488(If (SETNEF cmp) yes no) => (NEF cmp yes no)
489
490(If cond yes no) => (NE (TESTB cond cond) yes no)
491
492(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
493
494// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here.
495(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
496(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
497(AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
498(AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
499
500// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
501// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
502(AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
503(AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
504(AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
505(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
506
507// Atomic exchanges.
508(AtomicExchange8 ptr val mem) => (XCHGB val ptr mem)
509(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
510(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
511
512// Atomic adds.
513(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
514(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
515(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
516(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
517(Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
518(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
519
520// Atomic compare and swap.
521(AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
522(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
523
524// Atomic memory logical operations (old style).
525(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
526(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
527(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
528(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
529
530// Atomic memory logical operations (new style).
531(Atomic(And64|And32|Or64|Or32)value ptr val mem) => (LoweredAtomic(And64|And32|Or64|Or32) ptr val mem)
532
533// Write barrier.
534(WB ...) => (LoweredWB ...)
535
536(PanicBounds ...) => (LoweredPanicBoundsRR ...)
537(LoweredPanicBoundsRR [kind] x (MOVQconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
538(LoweredPanicBoundsRR [kind] (MOVQconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
539(LoweredPanicBoundsRC [kind] {p} (MOVQconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
540(LoweredPanicBoundsCR [kind] {p} (MOVQconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
541
542// lowering rotates
543(RotateLeft8 ...) => (ROLB ...)
544(RotateLeft16 ...) => (ROLW ...)
545(RotateLeft32 ...) => (ROLL ...)
546(RotateLeft64 ...) => (ROLQ ...)
547
548// ***************************
549// Above: lowering rules
550// Below: optimizations
551// ***************************
552// TODO: Should the optimizations be a separate pass?
553
554// Fold boolean tests into blocks
555(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
556(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
557(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
558(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
559(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
560(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
561(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
562(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
563(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
564(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
565(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
566
567// Unsigned comparisons to 0/1
568(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
569(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
570(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
571(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
572
573// x & 1 != 0 -> x & 1
574(SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
575(SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
576// x & 1 == 0 -> (x & 1) ^ 1
577(SETAE (BT(L|Q)const [0] x)) => (XORLconst [1] (ANDLconst <typ.Bool> [1] x))
578
579// Shorten compare by rewriting x < 128 as x <= 127, which can be encoded in a single-byte immediate on x86.
580(SETL c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETLE (CMP(Q|L)const [127] x))
581(SETB c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETBE (CMP(Q|L)const [127] x))
582
583// x >= 128 -> x > 127
584(SETGE c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETG (CMP(Q|L)const [127] x))
585(SETAE c:(CMP(Q|L)const [128] x)) && c.Uses == 1 => (SETA (CMP(Q|L)const [127] x))
586
587(CMOVQLT x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVQLE x y (CMP(Q|L)const [127] z))
588(CMOVLLT x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVLLE x y (CMP(Q|L)const [127] z))
589(LT c:(CMP(Q|L)const [128] z) yes no) && c.Uses == 1 => (LE (CMP(Q|L)const [127] z) yes no)
590(CMOVQGE x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVQGT x y (CMP(Q|L)const [127] z))
591(CMOVLGE x y c:(CMP(Q|L)const [128] z)) && c.Uses == 1 => (CMOVLGT x y (CMP(Q|L)const [127] z))
592(GE c:(CMP(Q|L)const [128] z) yes no) && c.Uses == 1 => (GT (CMP(Q|L)const [127] z) yes no)
593
594// Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
595// Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
596// into tests for carry flags.
597// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
598// mutandis, for UGE and SETAE, and CC and SETCC.
599((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
600((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
601((NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
602 => ((ULT|UGE) (BTLconst [int8(log32u(uint32(c)))] x))
603((NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
604 => ((ULT|UGE) (BTQconst [int8(log32u(uint32(c)))] x))
605((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
606 => ((ULT|UGE) (BTQconst [int8(log64u(uint64(c)))] x))
607(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
608(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
609(SET(NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
610 => (SET(B|AE) (BTLconst [int8(log32u(uint32(c)))] x))
611(SET(NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
612 => (SET(B|AE) (BTQconst [int8(log32u(uint32(c)))] x))
613(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
614 => (SET(B|AE) (BTQconst [int8(log64u(uint64(c)))] x))
615// SET..store variant
616(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
617 => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
618(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
619 => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
620(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUnsignedPowerOfTwo(uint32(c))
621 => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
622(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUnsignedPowerOfTwo(uint64(c))
623 => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
624(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUnsignedPowerOfTwo(uint64(c))
625 => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
626
627// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
628// and further combining shifts.
629(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
630(BT(Q|L)const [c] (ADDQ x x)) && c>1 => (BT(Q|L)const [c-1] x)
631(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x)
632(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
633(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
634(BTLconst [c] (ADDL x x)) && c>1 => (BTLconst [c-1] x)
635(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x)
636(BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x)
637
638// Rewrite a & 1 != 1 into a & 1 == 0.
639// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
640(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
641(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
642(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
643(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
644
645// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
646(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
647(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
648// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
649// the constant field of the OR/XOR instruction. See issue 61694.
650((OR|XOR)Q (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64u(uint64(c)))] x)
651
652// Recognize bit clearing: a &^= 1<<b
653(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
654(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
655// Note: only convert AND to BTR if the constant wouldn't fit in
656// the constant field of the AND instruction. See issue 61694.
657(ANDQ (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64u(uint64(^c)))] x)
658
659// Special-case bit patterns on first/last bit.
660// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
661// for instance:
662// x & 0xFFFF0000 -> (x >> 16) << 16
663// x & 0x80000000 -> (x >> 31) << 31
664//
665// In case the mask is just one bit (like second example above), it conflicts
666// with the above rules to detect bit-testing / bit-clearing of first/last bit.
667// We thus special-case them, by detecting the shift patterns.
668
669// Special case resetting first/last bit
670(ADD(L|Q) (SHR(L|Q)const [1] x) (SHR(L|Q)const [1] x))
671 => (AND(L|Q)const [-2] x)
672(SHRLconst [1] (ADDL x x))
673 => (ANDLconst [0x7fffffff] x)
674(SHRQconst [1] (ADDQ x x))
675 => (BTRQconst [63] x)
676
677// Special case testing first/last bit (with double-shift generated by generic.rules)
678((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
679 => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
680((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
681 => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
682(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
683 => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
684(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
685 => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
686
687((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
688 => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
689((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
690 => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
691(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
692 => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
693(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
694 => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
695
696// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
697((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
698 => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
699((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
700 => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
701(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
702 => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
703(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
704 => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
705
706// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
707(BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x)
708(BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x)
709(BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x)
710(BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x)
711
712// Fold boolean negation into SETcc.
713(XORLconst [1] (SETNE x)) => (SETEQ x)
714(XORLconst [1] (SETEQ x)) => (SETNE x)
715(XORLconst [1] (SETL x)) => (SETGE x)
716(XORLconst [1] (SETGE x)) => (SETL x)
717(XORLconst [1] (SETLE x)) => (SETG x)
718(XORLconst [1] (SETG x)) => (SETLE x)
719(XORLconst [1] (SETB x)) => (SETAE x)
720(XORLconst [1] (SETAE x)) => (SETB x)
721(XORLconst [1] (SETBE x)) => (SETA x)
722(XORLconst [1] (SETA x)) => (SETBE x)
723
724// Special case for floating point - LF/LEF not generated
725(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
726(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
727(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
728(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
729
730// Disabled because it interferes with the pattern match above and makes worse code.
731// (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
732// (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
733
734// fold constants into instructions
735(ADDQ x (MOVQconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x)
736(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
737(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
738
739(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
740(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
741(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
742(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
743
744(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
745(MULL x (MOVLconst [c])) => (MULLconst [c] x)
746
747(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
748(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
749
750(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
751(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
752(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x)
753
754(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
755(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
756
757(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
758(ORQ x (MOVLconst [c])) => (ORQconst [c] x)
759(ORL x (MOVLconst [c])) => (ORLconst [c] x)
760
761(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
762(XORL x (MOVLconst [c])) => (XORLconst [c] x)
763
764(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
765(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
766
767(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
768(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
769(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
770(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
771(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
772(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
773
774(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
775(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
776(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
777(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
778
779// Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
780((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
781((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
782((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
783((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
784
785((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
786((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
787((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
788((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
789
790((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
791((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
792((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
793((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
794
795((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
796((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
797((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
798((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
799
800// rotate left negative = rotate right
801(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
802(ROLL x (NEG(Q|L) y)) => (RORL x y)
803(ROLW x (NEG(Q|L) y)) => (RORW x y)
804(ROLB x (NEG(Q|L) y)) => (RORB x y)
805
806// rotate right negative = rotate left
807(RORQ x (NEG(Q|L) y)) => (ROLQ x y)
808(RORL x (NEG(Q|L) y)) => (ROLL x y)
809(RORW x (NEG(Q|L) y)) => (ROLW x y)
810(RORB x (NEG(Q|L) y)) => (ROLB x y)
811
812// rotate by constants
813(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
814(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
815(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
816(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
817
818(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
819(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
820(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
821(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
822
823// Constant shift simplifications
824((SHLQ|SHRQ|SARQ)const x [0]) => x
825((SHLL|SHRL|SARL)const x [0]) => x
826((SHRW|SARW)const x [0]) => x
827((SHRB|SARB)const x [0]) => x
828((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
829
830// Multi-register shifts
831(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
832(ORQ (SH(R|L)XQ lo bits) (SH(L|R)XQ hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
833
834// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
835// because the x86 instructions are defined to use all 5 bits of the shift even
836// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
837// (SHRW x (MOVLconst [24])), but just in case.
838
839(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
840(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
841(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
842(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
843(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
844(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
845(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
846(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
847
848// Canonicalize the order of arguments to comparisons - helps with CSE.
849(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
850
851// Using MOVZX instead of AND is cheaper.
852(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)
853(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
854// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
855// Commenting out for now, because it also can't trigger because of the is32bit guard on the
856// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
857// Using an alternate form of this rule segfaults some binaries because of
858// adverse interactions with other passes.
859// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
860
861// strength reduction
862(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
863(MUL(Q|L)const [ 1] x) => x
864(MULQconst [c] x) && canMulStrengthReduce(config, int64(c)) => {mulStrengthReduce(v, x, int64(c))}
865(MULLconst [c] x) && v.Type.Size() <= 4 && canMulStrengthReduce32(config, c) => {mulStrengthReduce32(v, x, c)}
866
867// Prefer addition when shifting left by one
868(SHL(Q|L)const [1] x) => (ADD(Q|L) x x)
869
870// combine add/shift into LEAQ/LEAL
871(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
872(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
873(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y)
874(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x)
875
876// combine ADDQ/ADDQconst into LEAQ1/LEAL1
877(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
878(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
879(ADD(Q|L)const [c] (ADD(Q|L) x x)) => (LEA(Q|L)1 [c] x x)
880
881// fold ADDQ/ADDL into LEAQ/LEAL
882(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
883(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
884(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
885(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
886
887// fold ADDQconst/ADDLconst into LEAQx/LEALx
888(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
889(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
890(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
891(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
892(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
893(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
894(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
895(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
896(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
897(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
898(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
899
900// fold shifts into LEAQx/LEALx
901(LEA(Q|L)1 [c] {s} x z:(ADD(Q|L) y y)) && x != z => (LEA(Q|L)2 [c] {s} x y)
902(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
903(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
904(LEA(Q|L)2 [c] {s} x z:(ADD(Q|L) y y)) && x != z => (LEA(Q|L)4 [c] {s} x y)
905(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
906(LEA(Q|L)4 [c] {s} x z:(ADD(Q|L) y y)) && x != z => (LEA(Q|L)8 [c] {s} x y)
907
908// (x + x) << 1 -> x << 2
909(LEA(Q|L)2 [0] {s} (ADD(Q|L) x x) x) && s == nil => (SHL(Q|L)const [2] x)
910
911// (x + x) << 2 -> x << 3 and similar
912(SHL(Q|L)const [c] (ADD(Q|L) x x)) => (SHL(Q|L)const [c+1] x)
913
914// reverse ordering of compare instruction
915(SETL (InvertFlags x)) => (SETG x)
916(SETG (InvertFlags x)) => (SETL x)
917(SETB (InvertFlags x)) => (SETA x)
918(SETA (InvertFlags x)) => (SETB x)
919(SETLE (InvertFlags x)) => (SETGE x)
920(SETGE (InvertFlags x)) => (SETLE x)
921(SETBE (InvertFlags x)) => (SETAE x)
922(SETAE (InvertFlags x)) => (SETBE x)
923(SETEQ (InvertFlags x)) => (SETEQ x)
924(SETNE (InvertFlags x)) => (SETNE x)
925
926(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
927(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
928(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
929(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
930(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
931(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
932(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
933(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
934(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
935(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
936
937// sign extended loads
938// Note: The combined instruction must end up in the same block
939// as the original load. If not, we end up making a value with
940// memory type live in two different blocks, which can lead to
941// multiple memory values alive simultaneously.
942// Make sure we don't combine these ops if the load has another use.
943// This prevents a single load from being split into multiple loads
944// which then might return different values. See test/atomicload.go.
945(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
946(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
947(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
948(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
949(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
950(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
951(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
952(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
953(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
954(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
955(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
956(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
957(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
958(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
959(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
960(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
961(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
962(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
963
964// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
965(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
966(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
967(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
968(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
969(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
970(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
971(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
972
973// Fold extensions and ANDs together.
974(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
975(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
976(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
977(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
978(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
979(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
980
981// Don't extend before storing
982(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
983(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
984(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
985(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
986(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
987(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
988
989// fold constants into memory operations
990// Note that this is not always a good idea because if not all the uses of
991// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
992// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
993// Nevertheless, let's do it!
994(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
995 (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem)
996(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
997 (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem)
998(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
999 (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
1000((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
1001 ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
1002((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
1003 ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
1004(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
1005 (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
1006(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
1007 (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
1008
1009((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
1010 ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
1011((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
1012 ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
1013((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
1014 ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
1015((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
1016 ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
1017((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
1018 ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
1019((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
1020 ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
1021
1022// Fold constants into stores.
1023(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
1024 (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
1025(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
1026 (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
1027(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
1028 (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
1029(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
1030 (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
1031
1032// Fold address offsets into constant stores.
1033(MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
1034 (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
1035
1036// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
1037// what variables are being read/written by the ops.
1038(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
1039 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1040 (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
1041(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1042 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1043 (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1044(MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
1045 (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
1046(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1047 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1048 (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1049((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1050 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1051 ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1052((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1053 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1054 ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1055(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1056 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1057 (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1058(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
1059 && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
1060 (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
1061
1062((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1063 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1064 ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1065((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
1066 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1067 ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
1068((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
1069 && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
1070 ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
1071((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
1072 && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
1073 ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
1074((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1075 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1076 ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1077((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
1078 && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1079 ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
1080
1081// fold LEAQs together
1082(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1083 (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
1084
1085// LEAQ into LEAQ1
1086(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
1087 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
1088
1089// LEAQ1 into LEAQ
1090(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1091 (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
1092
1093// LEAQ into LEAQ[248]
1094(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
1095 (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
1096(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
1097 (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
1098(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
1099 (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
1100
1101// LEAQ[248] into LEAQ
1102(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1103 (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
1104(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1105 (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
1106(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1107 (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
1108
1109// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
1110(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1111 (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
1112(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1113 (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
1114(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
1115 (LEAQ4 [off1+2*off2] {sym1} x y)
1116(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
1117 (LEAQ8 [off1+4*off2] {sym1} x y)
1118// TODO: more?
1119
1120// Lower LEAQ2/4/8 when the offset is a constant
1121(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
1122 (LEAQ [off+int32(scale)*2] {sym} x)
1123(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
1124 (LEAQ [off+int32(scale)*4] {sym} x)
1125(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
1126 (LEAQ [off+int32(scale)*8] {sym} x)
1127
1128// Absorb InvertFlags into branches.
1129(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
1130(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
1131(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
1132(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
1133(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
1134(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
1135(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
1136(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
1137(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
1138(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
1139
1140// Constant comparisons.
1141(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
1142(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
1143(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
1144(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
1145(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
1146(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
1147(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
1148(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
1149(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
1150(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
1151(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
1152(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
1153(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
1154(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
1155(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
1156(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
1157(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
1158(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
1159(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
1160(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
1161
1162// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
1163// In theory this applies to any of the simplifications above,
1164// but CMPQ is the only one I've actually seen occur.
1165(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
1166(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
1167(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
1168(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
1169(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
1170
1171// Other known comparisons.
1172(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
1173(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
1174(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
1175(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
1176(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
1177(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
1178(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
1179(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
1180(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
1181
1182// TESTQ c c sets flags like CMPQ c 0.
1183(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
1184(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
1185(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT)
1186(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT)
1187(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT)
1188(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT)
1189
1190// TODO: DIVxU also.
1191
1192// Absorb flag constants into SBB ops.
1193(SBBQcarrymask (FlagEQ)) => (MOVQconst [0])
1194(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
1195(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
1196(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
1197(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
1198(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
1199(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
1200(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
1201(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
1202(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
1203
1204// Absorb flag constants into branches.
1205((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no)
1206((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes)
1207((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
1208((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
1209((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
1210((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
1211((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
1212((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
1213((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
1214((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
1215
1216// Absorb flag constants into SETxx ops.
1217((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1])
1218((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0])
1219((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1])
1220((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0])
1221((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1])
1222((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0])
1223((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1])
1224((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0])
1225((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1])
1226((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0])
1227
1228(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1229(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1230(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1231(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1232(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1233
1234(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1235(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1236(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1237(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1238(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1239
1240(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1241(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1242(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1243(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1244(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1245
1246(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1247(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1248(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1249(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1250(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1251
1252(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1253(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1254(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1255(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1256(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1257
1258(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1259(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1260(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1261(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1262(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1263
1264(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1265(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1266(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1267(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1268(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1269
1270(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1271(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1272(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1273(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1274(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1275
1276(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1277(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1278(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1279(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1280(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1281
1282(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1283(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1284(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1285(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
1286(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
1287
1288// Remove redundant *const ops
1289(ADDQconst [0] x) => x
1290(ADDLconst [c] x) && c==0 => x
1291(SUBQconst [0] x) => x
1292(SUBLconst [c] x) && c==0 => x
1293(ANDQconst [0] _) => (MOVQconst [0])
1294(ANDLconst [c] _) && c==0 => (MOVLconst [0])
1295(ANDQconst [-1] x) => x
1296(ANDLconst [c] x) && c==-1 => x
1297(ORQconst [0] x) => x
1298(ORLconst [c] x) && c==0 => x
1299(ORQconst [-1] _) => (MOVQconst [-1])
1300(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
1301(XORQconst [0] x) => x
1302(XORLconst [c] x) && c==0 => x
1303// TODO: since we got rid of the W/B versions, we might miss
1304// things like (ANDLconst [0x100] x) which were formerly
1305// (ANDBconst [0] x). Probably doesn't happen very often.
1306// If we cared, we might do:
1307// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
1308
1309// Remove redundant ops
1310// Not in generic rules, because they may appear after lowering e. g. Slicemask
1311(NEG(Q|L) (NEG(Q|L) x)) => x
1312(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
1313
1314// Convert constant subtracts to constant adds
1315(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
1316(SUBLconst [c] x) => (ADDLconst [-c] x)
1317
1318// generic constant folding
1319// TODO: more of this
1320(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
1321(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
1322(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
1323(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
1324(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
1325(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
1326(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
1327(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
1328(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
1329(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
1330(NEGQ (MOVQconst [c])) => (MOVQconst [-c])
1331(NEGL (MOVLconst [c])) => (MOVLconst [-c])
1332(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
1333(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
1334(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
1335(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
1336(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
1337(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
1338(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
1339(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
1340(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
1341(NOTL (MOVLconst [c])) => (MOVLconst [^c])
1342(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
1343(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
1344(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
1345
1346// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
1347// but we can still constant-fold.
1348// In theory this applies to any of the simplifications above,
1349// but ORQ is the only one I've actually seen occur.
1350(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
1351
1352// generic simplifications
1353// TODO: more of this
1354(ADDQ x (NEGQ y)) => (SUBQ x y)
1355(ADDL x (NEGL y)) => (SUBL x y)
1356(SUBQ x x) => (MOVQconst [0])
1357(SUBL x x) => (MOVLconst [0])
1358(ANDQ x x) => x
1359(ANDL x x) => x
1360(ORQ x x) => x
1361(ORL x x) => x
1362(XORQ x x) => (MOVQconst [0])
1363(XORL x x) => (MOVLconst [0])
1364
1365(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
1366(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
1367(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
1368
1369// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
1370(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
1371(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
1372
1373// checking AND against 0.
1374(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
1375(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
1376(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
1377(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
1378(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
1379(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
1380(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
1381(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
1382
1383// Convert TESTx to TESTxconst if possible.
1384(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
1385(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
1386(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
1387(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
1388
1389// TEST %reg,%reg is shorter than CMP
1390(CMPQconst x [0]) => (TESTQ x x)
1391(CMPLconst x [0]) => (TESTL x x)
1392(CMPWconst x [0]) => (TESTW x x)
1393(CMPBconst x [0]) => (TESTB x x)
1394(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
1395(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
1396(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
1397(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
1398
1399// Convert LEAQ1 back to ADDQ if we can
1400(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
1401
1402(MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
1403 && x.Uses == 1
1404 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
1405 && a.Val() == 0
1406 && c.Val() == 0
1407 && setPos(v, x.Pos)
1408 && clobber(x)
1409 => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
1410(MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
1411 && x.Uses == 1
1412 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
1413 && a.Val() == 0
1414 && c.Val() == 0
1415 && setPos(v, x.Pos)
1416 && clobber(x)
1417 => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
1418
1419// Merge load and op
1420// TODO: add indexed variants?
1421((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
1422((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
1423((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
1424((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
1425(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
1426(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
1427 ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
1428(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
1429(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
1430 ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
1431(MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) =>
1432 (BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
1433
1434// Merge ADDQconst and LEAQ into atomic loads.
1435(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
1436 (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
1437(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
1438 (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
1439
1440// Merge ADDQconst and LEAQ into atomic stores.
1441(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
1442 (XCHGQ [off1+off2] {sym} val ptr mem)
1443(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
1444 (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
1445(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
1446 (XCHGL [off1+off2] {sym} val ptr mem)
1447(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
1448 (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
1449
1450// Merge ADDQconst into atomic adds.
1451// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
1452(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
1453 (XADDQlock [off1+off2] {sym} val ptr mem)
1454(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
1455 (XADDLlock [off1+off2] {sym} val ptr mem)
1456
1457// Merge ADDQconst into atomic compare and swaps.
1458// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
1459(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
1460 (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
1461(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
1462 (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
1463
1464// We don't need the conditional move if we know the arg of BSF is not zero.
1465(CMOVQEQ x _ (Select1 (BS(F|R)Q (ORQconst [c] _)))) && c != 0 => x
1466// Extension is unnecessary for trailing zeros.
1467(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
1468(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
1469
1470// Redundant sign/zero extensions
1471// Note: see issue 21963. We have to make sure we use the right type on
1472// the resulting extension (the outer type, not the inner type).
1473(MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
1474(MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
1475(MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
1476(MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
1477(MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
1478(MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
1479(MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
1480(MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
1481(MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
1482(MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
1483(MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
1484(MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
1485
1486(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
1487 && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
1488 ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
1489(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
1490 && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
1491 ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
1492
1493// float <-> int register moves, with no conversion.
1494// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
1495(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
1496(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
1497(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val)
1498(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val)
1499
1500// Other load-like ops.
1501(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
1502(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
1503(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
1504(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
1505(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
1506(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
1507( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
1508( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
1509(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
1510(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
1511
1512(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
1513(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
1514(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
1515(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
1516(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
1517(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
1518
1519// Detect FMA
1520(ADDS(S|D) (MULS(S|D) x y) z) && buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v) => (VFMADD231S(S|D) z x y)
1521
1522// Redirect stores to use the other register set.
1523(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
1524(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
1525(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem)
1526(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem)
1527
1528(MOVSDstore [off] {sym} ptr (MOVSDconst [f]) mem) && f == f => (MOVQstore [off] {sym} ptr (MOVQconst [int64(math.Float64bits(f))]) mem)
1529(MOVSSstore [off] {sym} ptr (MOVSSconst [f]) mem) && f == f => (MOVLstore [off] {sym} ptr (MOVLconst [int32(math.Float32bits(f))]) mem)
1530
1531// Load args directly into the register class where it will be used.
1532// We do this by just modifying the type of the Arg.
1533(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
1534(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
1535(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
1536(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
1537
1538// LEAQ is rematerializeable, so this helps to avoid register spill.
1539// See issue 22947 for details
1540(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
1541
1542// HMULx is commutative, but its first argument must go in AX.
1543// If possible, put a rematerializeable value in the first argument slot,
1544// to reduce the odds that another value will be have to spilled
1545// specifically to free up AX.
1546(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x)
1547(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
1548
1549// Fold loads into compares
1550// Note: these may be undone by the flagalloc pass.
1551(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
1552(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
1553
1554(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
1555 && l.Uses == 1
1556 && clobber(l) =>
1557@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
1558(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
1559 && l.Uses == 1
1560 && clobber(l) =>
1561@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
1562
1563(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
1564(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
1565(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
1566(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
1567
1568(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
1569 && l == l2
1570 && l.Uses == 2
1571 && clobber(l) =>
1572 @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
1573
1574// Convert ANDload to MOVload when we can do the AND in a containing TEST op.
1575// Only do when it's within the same block, so we don't have flags live across basic block boundaries.
1576// See issue 44228.
1577(TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x)
1578
1579(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
1580(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1581(MOVLload <t> [off] {sym} (SB) _) && symIsRO(sym) && is32BitInt(t) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1582(MOVLload <t> [off] {sym} (SB) _) && symIsRO(sym) && is64BitInt(t) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1583(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1584(MOVBQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int8(read8(sym, int64(off))))])
1585(MOVWQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
1586(MOVLQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
1587
1588
1589(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
1590 (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
1591 (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
1592
1593// Arch-specific inlining for small or disjoint runtime.memmove
1594// Match post-lowering calls, memory version.
1595(SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
1596 && sc.Val64() >= 0
1597 && isSameCall(sym, "runtime.memmove")
1598 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
1599 && isInlinableMemmove(dst, src, sc.Val64(), config)
1600 && clobber(s1, s2, s3, call)
1601 => (Move [sc.Val64()] dst src mem)
1602
1603// Match post-lowering calls, register version.
1604(SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
1605 && sz >= 0
1606 && isSameCall(sym, "runtime.memmove")
1607 && call.Uses == 1
1608 && isInlinableMemmove(dst, src, sz, config)
1609 && clobber(call)
1610 => (Move [sz] dst src mem)
1611
1612// Prefetch instructions
1613(PrefetchCache ...) => (PrefetchT0 ...)
1614(PrefetchCacheStreamed ...) => (PrefetchNTA ...)
1615
1616// CPUID feature: BMI1.
1617(AND(Q|L) x (NOT(Q|L) y)) && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y)
1618(AND(Q|L) x (NEG(Q|L) x)) && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x)
1619(XOR(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x)
1620(AND(Q|L) <t> x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (Select0 <t> (BLSR(Q|L) x))
1621// eliminate TEST instruction in classical "isPowerOfTwo" check
1622(SETEQ (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (SETEQ (Select1 <types.TypeFlags> blsr))
1623(CMOVQEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
1624(CMOVLEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
1625(EQ (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (EQ (Select1 <types.TypeFlags> blsr) yes no)
1626(SETNE (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (SETNE (Select1 <types.TypeFlags> blsr))
1627(CMOVQNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
1628(CMOVLNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
1629(NE (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (NE (Select1 <types.TypeFlags> blsr) yes no)
1630
1631(BSWAP(Q|L) (BSWAP(Q|L) p)) => p
1632
1633// CPUID feature: MOVBE.
1634(MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
1635(MOVBE(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 => (MOV(Q|L)store [i] {s} p w mem)
1636(BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem)) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => @x.Block (MOVBE(Q|L)load [i] {s} p mem)
1637(BSWAP(Q|L) x:(MOVBE(Q|L)load [i] {s} p mem)) && x.Uses == 1 => @x.Block (MOV(Q|L)load [i] {s} p mem)
1638(MOVWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBEWstore [i] {s} p w mem)
1639(MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 => (MOVWstore [i] {s} p w mem)
1640
1641(SAR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
1642(SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
1643(SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
1644
1645((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
1646((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
1647((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
1648
1649// Convert atomic logical operations to easier ones if we don't use the result.
1650(Select1 a:(LoweredAtomic(And64|And32|Or64|Or32) ptr val mem)) && a.Uses == 1 && clobber(a) => ((ANDQ|ANDL|ORQ|ORL)lock ptr val mem)
1651
1652// If we are checking the results of an add, use the flags directly from the add.
1653// Note that this only works for EQ/NE. ADD sets the CF/OF flags differently
1654// than TEST sets them.
1655// Note also that a.Args[0] here refers to the post-flagify'd value.
1656((EQ|NE) t:(TESTQ a:(ADDQconst [c] x) a)) && t.Uses == 1 && flagify(a) => ((EQ|NE) (Select1 <types.TypeFlags> a.Args[0]))
1657((EQ|NE) t:(TESTL a:(ADDLconst [c] x) a)) && t.Uses == 1 && flagify(a) => ((EQ|NE) (Select1 <types.TypeFlags> a.Args[0]))
1658
1659// If we don't use the flags any more, just use the standard op.
1660(Select0 a:(ADD(Q|L)constflags [c] x)) && a.Uses == 1 => (ADD(Q|L)const [c] x)
1661
1662// SIMD lowering rules
1663
1664// Mask conversions
1665// integers to masks
1666(Cvt16toMask8x16 <t> x) => (VPMOVMToVec8x16 <types.TypeVec128> (KMOVWk <t> x))
1667(Cvt32toMask8x32 <t> x) => (VPMOVMToVec8x32 <types.TypeVec256> (KMOVDk <t> x))
1668(Cvt64toMask8x64 <t> x) => (VPMOVMToVec8x64 <types.TypeVec512> (KMOVQk <t> x))
1669
1670(Cvt8toMask16x8 <t> x) => (VPMOVMToVec16x8 <types.TypeVec128> (KMOVBk <t> x))
1671(Cvt16toMask16x16 <t> x) => (VPMOVMToVec16x16 <types.TypeVec256> (KMOVWk <t> x))
1672(Cvt32toMask16x32 <t> x) => (VPMOVMToVec16x32 <types.TypeVec512> (KMOVDk <t> x))
1673
1674(Cvt8toMask32x4 <t> x) => (VPMOVMToVec32x4 <types.TypeVec128> (KMOVBk <t> x))
1675(Cvt8toMask32x8 <t> x) => (VPMOVMToVec32x8 <types.TypeVec256> (KMOVBk <t> x))
1676(Cvt16toMask32x16 <t> x) => (VPMOVMToVec32x16 <types.TypeVec512> (KMOVWk <t> x))
1677
1678(Cvt8toMask64x2 <t> x) => (VPMOVMToVec64x2 <types.TypeVec128> (KMOVBk <t> x))
1679(Cvt8toMask64x4 <t> x) => (VPMOVMToVec64x4 <types.TypeVec256> (KMOVBk <t> x))
1680(Cvt8toMask64x8 <t> x) => (VPMOVMToVec64x8 <types.TypeVec512> (KMOVBk <t> x))
1681
1682// masks to integers
1683(CvtMask8x16to16 ...) => (VPMOVMSKB128 ...)
1684(CvtMask8x32to32 ...) => (VPMOVMSKB256 ...)
1685(CvtMask8x64to64 x) => (KMOVQi (VPMOVVec8x64ToM <types.TypeMask> x))
1686
1687(CvtMask16x8to8 x) => (KMOVBi (VPMOVVec16x8ToM <types.TypeMask> x))
1688(CvtMask16x16to16 x) => (KMOVWi (VPMOVVec16x16ToM <types.TypeMask> x))
1689(CvtMask16x32to32 x) => (KMOVDi (VPMOVVec16x32ToM <types.TypeMask> x))
1690
1691(CvtMask32x4to8 ...) => (VMOVMSKPS128 ...)
1692(CvtMask32x8to8 ...) => (VMOVMSKPS256 ...)
1693(CvtMask32x16to16 x) => (KMOVWi (VPMOVVec32x16ToM <types.TypeMask> x))
1694
1695(CvtMask64x2to8 ...) => (VMOVMSKPD128 ...)
1696(CvtMask64x4to8 ...) => (VMOVMSKPD256 ...)
1697(CvtMask64x8to8 x) => (KMOVBi (VPMOVVec64x8ToM <types.TypeMask> x))
1698
1699// optimizations
1700(MOVBstore [off] {sym} ptr (KMOVBi mask) mem) => (KMOVBstore [off] {sym} ptr mask mem)
1701(MOVWstore [off] {sym} ptr (KMOVWi mask) mem) => (KMOVWstore [off] {sym} ptr mask mem)
1702(MOVLstore [off] {sym} ptr (KMOVDi mask) mem) => (KMOVDstore [off] {sym} ptr mask mem)
1703(MOVQstore [off] {sym} ptr (KMOVQi mask) mem) => (KMOVQstore [off] {sym} ptr mask mem)
1704
1705(KMOVBk l:(MOVBload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVBload [off] {sym} ptr mem)
1706(KMOVWk l:(MOVWload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVWload [off] {sym} ptr mem)
1707(KMOVDk l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVDload [off] {sym} ptr mem)
1708(KMOVQk l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l) && clobber(l) => (KMOVQload [off] {sym} ptr mem)
1709
1710// SIMD vector loads and stores
1711(Load <t> ptr mem) && t.Size() == 16 => (VMOVDQUload128 ptr mem)
1712(Store {t} ptr val mem) && t.Size() == 16 => (VMOVDQUstore128 ptr val mem)
1713
1714(Load <t> ptr mem) && t.Size() == 32 => (VMOVDQUload256 ptr mem)
1715(Store {t} ptr val mem) && t.Size() == 32 => (VMOVDQUstore256 ptr val mem)
1716
1717(Load <t> ptr mem) && t.Size() == 64 => (VMOVDQUload512 ptr mem)
1718(Store {t} ptr val mem) && t.Size() == 64 => (VMOVDQUstore512 ptr val mem)
1719
1720// SIMD vector integer-vector-masked loads and stores.
1721(LoadMasked32 <t> ptr mask mem) && t.Size() == 16 => (VPMASK32load128 ptr mask mem)
1722(LoadMasked32 <t> ptr mask mem) && t.Size() == 32 => (VPMASK32load256 ptr mask mem)
1723(LoadMasked64 <t> ptr mask mem) && t.Size() == 16 => (VPMASK64load128 ptr mask mem)
1724(LoadMasked64 <t> ptr mask mem) && t.Size() == 32 => (VPMASK64load256 ptr mask mem)
1725
1726(StoreMasked32 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK32store128 ptr mask val mem)
1727(StoreMasked32 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK32store256 ptr mask val mem)
1728(StoreMasked64 {t} ptr mask val mem) && t.Size() == 16 => (VPMASK64store128 ptr mask val mem)
1729(StoreMasked64 {t} ptr mask val mem) && t.Size() == 32 => (VPMASK64store256 ptr mask val mem)
1730
1731// Misc
1732(IsZeroVec x) => (SETEQ (VPTEST x x))
1733
1734(IsNaNFloat32x4 x) => (VCMPPS128 [3] x x)
1735(IsNaNFloat32x8 x) => (VCMPPS256 [3] x x)
1736(IsNaNFloat32x16 x) => (VPMOVMToVec32x16 (VCMPPS512 [3] x x))
1737(IsNaNFloat64x2 x) => (VCMPPD128 [3] x x)
1738(IsNaNFloat64x4 x) => (VCMPPD256 [3] x x)
1739(IsNaNFloat64x8 x) => (VPMOVMToVec64x8 (VCMPPD512 [3] x x))
1740
1741// SIMD vector K-masked loads and stores
1742
1743(LoadMasked64 <t> ptr mask mem) && t.Size() == 64 => (VPMASK64load512 ptr (VPMOVVec64x8ToM <types.TypeMask> mask) mem)
1744(LoadMasked32 <t> ptr mask mem) && t.Size() == 64 => (VPMASK32load512 ptr (VPMOVVec32x16ToM <types.TypeMask> mask) mem)
1745(LoadMasked16 <t> ptr mask mem) && t.Size() == 64 => (VPMASK16load512 ptr (VPMOVVec16x32ToM <types.TypeMask> mask) mem)
1746(LoadMasked8 <t> ptr mask mem) && t.Size() == 64 => (VPMASK8load512 ptr (VPMOVVec8x64ToM <types.TypeMask> mask) mem)
1747
1748(StoreMasked64 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK64store512 ptr (VPMOVVec64x8ToM <types.TypeMask> mask) val mem)
1749(StoreMasked32 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK32store512 ptr (VPMOVVec32x16ToM <types.TypeMask> mask) val mem)
1750(StoreMasked16 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK16store512 ptr (VPMOVVec16x32ToM <types.TypeMask> mask) val mem)
1751(StoreMasked8 {t} ptr mask val mem) && t.Size() == 64 => (VPMASK8store512 ptr (VPMOVVec8x64ToM <types.TypeMask> mask) val mem)
1752
1753(ZeroSIMD <t>) && t.Size() == 16 => (Zero128 <t>)
1754(ZeroSIMD <t>) && t.Size() == 32 => (Zero256 <t>)
1755(ZeroSIMD <t>) && t.Size() == 64 => (Zero512 <t>)
1756
1757(VPMOVVec8x16ToM (VPMOVMToVec8x16 x)) => x
1758(VPMOVVec8x32ToM (VPMOVMToVec8x32 x)) => x
1759(VPMOVVec8x64ToM (VPMOVMToVec8x64 x)) => x
1760
1761(VPMOVVec16x8ToM (VPMOVMToVec16x8 x)) => x
1762(VPMOVVec16x16ToM (VPMOVMToVec16x16 x)) => x
1763(VPMOVVec16x32ToM (VPMOVMToVec16x32 x)) => x
1764
1765(VPMOVVec32x4ToM (VPMOVMToVec32x4 x)) => x
1766(VPMOVVec32x8ToM (VPMOVMToVec32x8 x)) => x
1767(VPMOVVec32x16ToM (VPMOVMToVec32x16 x)) => x
1768
1769(VPMOVVec64x2ToM (VPMOVMToVec64x2 x)) => x
1770(VPMOVVec64x4ToM (VPMOVMToVec64x4 x)) => x
1771(VPMOVVec64x8ToM (VPMOVMToVec64x8 x)) => x
1772
1773(VPANDQ512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k)
1774(VPANDQ512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k)
1775(VPANDQ512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k)
1776(VPANDQ512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k)
1777(VPANDD512 x (VPMOVMToVec64x8 k)) => (VMOVDQU64Masked512 x k)
1778(VPANDD512 x (VPMOVMToVec32x16 k)) => (VMOVDQU32Masked512 x k)
1779(VPANDD512 x (VPMOVMToVec16x32 k)) => (VMOVDQU16Masked512 x k)
1780(VPANDD512 x (VPMOVMToVec8x64 k)) => (VMOVDQU8Masked512 x k)
1781
1782(VPAND128 x (VPMOVMToVec8x16 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU8Masked128 x k)
1783(VPAND128 x (VPMOVMToVec16x8 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU16Masked128 x k)
1784(VPAND128 x (VPMOVMToVec32x4 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU32Masked128 x k)
1785(VPAND128 x (VPMOVMToVec64x2 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU64Masked128 x k)
1786
1787(VPAND256 x (VPMOVMToVec8x32 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU8Masked256 x k)
1788(VPAND256 x (VPMOVMToVec16x16 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU16Masked256 x k)
1789(VPAND256 x (VPMOVMToVec32x8 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU32Masked256 x k)
1790(VPAND256 x (VPMOVMToVec64x4 k)) && v.Block.CPUfeatures.hasFeature(CPUavx512) => (VMOVDQU64Masked256 x k)
1791
1792// Insert to zero of 32/64 bit floats and ints to a zero is just MOVS[SD]
1793(VPINSRQ128 [0] (Zero128 <t>) y) && y.Type.IsFloat() => (VMOVSDf2v <types.TypeVec128> y)
1794(VPINSRD128 [0] (Zero128 <t>) y) && y.Type.IsFloat() => (VMOVSSf2v <types.TypeVec128> y)
1795(VPINSRQ128 [0] (Zero128 <t>) y) && !y.Type.IsFloat() => (VMOVQ <types.TypeVec128> y)
1796(VPINSRD128 [0] (Zero128 <t>) y) && !y.Type.IsFloat() => (VMOVD <types.TypeVec128> y)
1797
1798// These rewrites can skip zero-extending the 8/16-bit inputs because they are
1799// only used as the input to a broadcast; the potentially "bad" bits are ignored
1800(VPBROADCASTB(128|256|512) x:(VPINSRB128 [0] (Zero128 <t>) y)) && x.Uses == 1 =>
1801 (VPBROADCASTB(128|256|512) (VMOVQ <types.TypeVec128> y))
1802(VPBROADCASTW(128|256|512) x:(VPINSRW128 [0] (Zero128 <t>) y)) && x.Uses == 1 =>
1803 (VPBROADCASTW(128|256|512) (VMOVQ <types.TypeVec128> y))
1804
1805(VMOVQ x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVQload <v.Type> [off] {sym} ptr mem)
1806(VMOVD x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVDload <v.Type> [off] {sym} ptr mem)
1807
1808(VMOVSDf2v x:(MOVSDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVSDload <v.Type> [off] {sym} ptr mem)
1809(VMOVSSf2v x:(MOVSSload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (VMOVSSload <v.Type> [off] {sym} ptr mem)
1810
1811(VMOVSDf2v x:(MOVSDconst [c] )) => (VMOVSDconst [c] )
1812(VMOVSSf2v x:(MOVSSconst [c] )) => (VMOVSSconst [c] )
1813
1814(VMOVDQUload(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (VMOVDQUload(128|256|512) [off1+off2] {sym} ptr mem)
1815(VMOVDQUstore(128|256|512) [off1] {sym} x:(ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (VMOVDQUstore(128|256|512) [off1+off2] {sym} ptr val mem)
1816(VMOVDQUload(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => (VMOVDQUload(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base mem)
1817(VMOVDQUstore(128|256|512) [off1] {sym1} x:(LEAQ [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => (VMOVDQUstore(128|256|512) [off1+off2] {mergeSym(sym1, sym2)} base val mem)
1818
1819// 2-op VPTEST optimizations
1820(SETEQ (VPTEST x:(VPAND(128|256) j k) y)) && x == y && x.Uses == 2 => (SETEQ (VPTEST j k))
1821(SETEQ (VPTEST x:(VPAND(D|Q)512 j k) y)) && x == y && x.Uses == 2 => (SETEQ (VPTEST j k))
1822(SETEQ (VPTEST x:(VPANDN(128|256) j k) y)) && x == y && x.Uses == 2 => (SETB (VPTEST k j)) // AndNot has swapped its operand order
1823(SETEQ (VPTEST x:(VPANDN(D|Q)512 j k) y)) && x == y && x.Uses == 2 => (SETB (VPTEST k j)) // AndNot has swapped its operand order
1824(EQ (VPTEST x:(VPAND(128|256) j k) y) yes no) && x == y && x.Uses == 2 => (EQ (VPTEST j k) yes no)
1825(EQ (VPTEST x:(VPAND(D|Q)512 j k) y) yes no) && x == y && x.Uses == 2 => (EQ (VPTEST j k) yes no)
1826(EQ (VPTEST x:(VPANDN(128|256) j k) y) yes no) && x == y && x.Uses == 2 => (ULT (VPTEST k j) yes no) // AndNot has swapped its operand order
1827(EQ (VPTEST x:(VPANDN(D|Q)512 j k) y) yes no) && x == y && x.Uses == 2 => (ULT (VPTEST k j) yes no) // AndNot has swapped its operand order
1828
1829// optimize x.IsNaN().Or(y.IsNaN())
1830(VPOR128 (VCMPP(S|D)128 [3] x x) (VCMPP(S|D)128 [3] y y)) => (VCMPP(S|D)128 [3] x y)
1831(VPOR256 (VCMPP(S|D)256 [3] x x) (VCMPP(S|D)256 [3] y y)) => (VCMPP(S|D)256 [3] x y)
1832(VPORD512 (VPMOVMToVec32x16 (VCMPPS512 [3] x x)) (VPMOVMToVec32x16 (VCMPPS512 [3] y y))) =>
1833 (VPMOVMToVec32x16 (VCMPPS512 [3] x y))
1834(VPORD512 (VPMOVMToVec64x8 (VCMPPD512 [3] x x)) (VPMOVMToVec64x8 (VCMPPD512 [3] y y))) =>
1835 (VPMOVMToVec64x8 (VCMPPD512 [3] x y))
View as plain text