1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Lowering arithmetic
6(Add(Ptr|64|32|16|8) ...) => (ADD ...)
7(Add(64|32)F ...) => (FADD(D|S) ...)
8
9(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
10(Sub(64|32)F ...) => (FSUB(D|S) ...)
11
12(Mul64 ...) => (MUL ...)
13(Mul64uhilo ...) => (LoweredMuluhilo ...)
14(Mul64uover ...) => (LoweredMuluover ...)
15(Mul(32|16|8) ...) => (MULW ...)
16(Mul(64|32)F ...) => (FMUL(D|S) ...)
17
18(Div(64|32)F ...) => (FDIV(D|S) ...)
19
20(Div64 x y [false]) => (DIV x y)
21(Div64u ...) => (DIVU ...)
22(Div32 x y [false]) => (DIVW x y)
23(Div32u ...) => (DIVUW ...)
24(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
25(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
26(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
27(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
28
29(Hmul64 ...) => (MULH ...)
30(Hmul64u ...) => (MULHU ...)
31(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
32(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
33
34(Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
35(Select1 (Add64carry x y c)) =>
36 (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
37
38(Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
39(Select1 (Sub64borrow x y c)) =>
40 (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
41
42// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
43(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
44
45(Mod64 x y [false]) => (REM x y)
46(Mod64u ...) => (REMU ...)
47(Mod32 x y [false]) => (REMW x y)
48(Mod32u ...) => (REMUW ...)
49(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
50(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
51(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
52(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
53
54(And(64|32|16|8) ...) => (AND ...)
55(Or(64|32|16|8) ...) => (OR ...)
56(Xor(64|32|16|8) ...) => (XOR ...)
57
58(Neg(64|32|16|8) ...) => (NEG ...)
59(Neg(64|32)F ...) => (FNEG(D|S) ...)
60
61(Com(64|32|16|8) ...) => (NOT ...)
62
63(Sqrt ...) => (FSQRTD ...)
64(Sqrt32 ...) => (FSQRTS ...)
65
66(Copysign ...) => (FSGNJD ...)
67
68(Abs ...) => (FABSD ...)
69
70(FMA ...) => (FMADDD ...)
71
72(Min(64|32)F ...) => (LoweredFMIN(D|S) ...)
73(Max(64|32)F ...) => (LoweredFMAX(D|S) ...)
74
75// Sign and zero extension.
76
77(SignExt8to16 ...) => (MOVBreg ...)
78(SignExt8to32 ...) => (MOVBreg ...)
79(SignExt8to64 ...) => (MOVBreg ...)
80(SignExt16to32 ...) => (MOVHreg ...)
81(SignExt16to64 ...) => (MOVHreg ...)
82(SignExt32to64 ...) => (MOVWreg ...)
83
84(ZeroExt8to16 ...) => (MOVBUreg ...)
85(ZeroExt8to32 ...) => (MOVBUreg ...)
86(ZeroExt8to64 ...) => (MOVBUreg ...)
87(ZeroExt16to32 ...) => (MOVHUreg ...)
88(ZeroExt16to64 ...) => (MOVHUreg ...)
89(ZeroExt32to64 ...) => (MOVWUreg ...)
90
91(Cvt32to32F ...) => (FCVTSW ...)
92(Cvt32to64F ...) => (FCVTDW ...)
93(Cvt64to32F ...) => (FCVTSL ...)
94(Cvt64to64F ...) => (FCVTDL ...)
95
96(Cvt32Fto32 ...) => (FCVTWS ...)
97(Cvt32Fto64 ...) => (FCVTLS ...)
98(Cvt64Fto32 ...) => (FCVTWD ...)
99(Cvt64Fto64 ...) => (FCVTLD ...)
100
101(Cvt32Fto64F ...) => (FCVTDS ...)
102(Cvt64Fto32F ...) => (FCVTSD ...)
103
104(CvtBoolToUint8 ...) => (Copy ...)
105
106(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
107
108(Slicemask <t> x) => (SRAI [63] (NEG <t> x))
109
110// Truncations
111// We ignore the unused high parts of registers, so truncates are just copies.
112(Trunc16to8 ...) => (Copy ...)
113(Trunc32to8 ...) => (Copy ...)
114(Trunc32to16 ...) => (Copy ...)
115(Trunc64to8 ...) => (Copy ...)
116(Trunc64to16 ...) => (Copy ...)
117(Trunc64to32 ...) => (Copy ...)
118
119// Shifts
120
121// SLL only considers the bottom 6 bits of y. If y > 64, the result should
122// always be 0.
123//
124// Breaking down the operation:
125//
126// (SLL x y) generates x << (y & 63).
127//
128// If y < 64, this is the value we want. Otherwise, we want zero.
129//
130// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
131(Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
132(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
133(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
134(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
135(Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
136(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
137(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
138(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
139(Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
140(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
141(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
142(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
143(Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
144(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
145(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
146(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
147
148(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
149(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
150(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
151(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
152
153// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
154// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
155// the maximum value. See Lsh above for a detailed description.
156(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
157(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
158(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
159(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
160(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
161(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
162(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
163(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
164(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
165(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
166(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
167(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
168(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
169(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
170(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
171(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
172
173(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
174(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
175(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y)
176(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
177
178// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
179// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
180// depending on the instruction), the result of the shift should be either 0
181// or -1 based on the sign bit of x.
182//
183// We implement this by performing the max shift (-1) if y > the maximum value.
184//
185// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
186// us with -1 (0xffff...) if y >= 64. Similarly, we OR (uint64(y < 32) - 1) into y
187// before passing it to SRAW.
188//
189// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
190// more than the 5 or 6 bits SRAW and SRA care about.
191(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
192(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
193(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
194(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
195(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
196(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
197(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
198(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
199(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
200(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
201(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
202(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
203(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
204(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
205(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
206(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
207
208(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
209(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
210(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
211(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
212
213// Rotates.
214(RotateLeft8 <t> x y) => (OR (SLL <t> x (ANDI [7] <y.Type> y)) (SRL <t> (ZeroExt8to64 x) (ANDI [7] <y.Type> (NEG <y.Type> y))))
215(RotateLeft16 <t> x y) => (OR (SLL <t> x (ANDI [15] <y.Type> y)) (SRL <t> (ZeroExt16to64 x) (ANDI [15] <y.Type> (NEG <y.Type> y))))
216(RotateLeft32 ...) => (ROLW ...)
217(RotateLeft64 ...) => (ROL ...)
218
219// Count trailing zeros (note that these will only be emitted for rva22u64 and above).
220(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
221(Ctz64 ...) => (CTZ ...)
222(Ctz32 ...) => (CTZW ...)
223(Ctz16 x) => (CTZW (ORI <typ.UInt32> [1<<16] x))
224(Ctz8 x) => (CTZW (ORI <typ.UInt32> [1<<8] x))
225
226// Bit length (note that these will only be emitted for rva22u64 and above).
227(BitLen64 <t> x) => (SUB (MOVDconst [64]) (CLZ <t> x))
228(BitLen32 <t> x) => (SUB (MOVDconst [32]) (CLZW <t> x))
229(BitLen16 x) => (BitLen64 (ZeroExt16to64 x))
230(BitLen8 x) => (BitLen64 (ZeroExt8to64 x))
231
232// Byte swap (note that these will only be emitted for rva22u64 and above).
233(Bswap64 ...) => (REV8 ...)
234(Bswap32 <t> x) => (SRLI [32] (REV8 <t> x))
235(Bswap16 <t> x) => (SRLI [48] (REV8 <t> x))
236
237// Population count (note that these will be emitted with guards for rva20u64).
238(PopCount64 ...) => (CPOP ...)
239(PopCount32 ...) => (CPOPW ...)
240(PopCount16 x) => (CPOP (ZeroExt16to64 x))
241(PopCount8 x) => (CPOP (ZeroExt8to64 x))
242
243(Less64 ...) => (SLT ...)
244(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
245(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
246(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
247(Less64U ...) => (SLTU ...)
248(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
249(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
250(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
251(Less(64|32)F ...) => (FLT(D|S) ...)
252
253// Convert x <= y to !(y > x).
254(Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x))
255(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
256(Leq(64|32)F ...) => (FLE(D|S) ...)
257
258(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
259(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
260(Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
261(Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
262(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
263(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
264(Eq(64|32)F ...) => (FEQ(D|S) ...)
265
266(NeqPtr x y) => (Not (EqPtr x y))
267(Neq64 x y) => (Not (Eq64 x y))
268(Neq32 x y) => (Not (Eq32 x y))
269(Neq16 x y) => (Not (Eq16 x y))
270(Neq8 x y) => (Not (Eq8 x y))
271(Neq(64|32)F ...) => (FNE(D|S) ...)
272
273// Loads
274(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
275(Load <t> ptr mem) && ( is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
276(Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
277(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
278(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
279(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
280(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
281(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
282(Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
283(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
284
285// Stores
286(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
287(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
288(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
289(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
290(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem)
291(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
292
293// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
294// knows what variables are being read/written by the ops.
295(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
296 is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
297 (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
298 (MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
299
300(FMOV(W|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
301 is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
302 (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
303 (FMOV(W|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
304
305(MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
306 is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
307 (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
308 (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
309
310(MOV(B|H|W|D)storezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
311 canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) &&
312 (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
313 (MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
314
315(FMOV(W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
316 is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
317 (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
318 (FMOV(W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
319
320(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
321 (MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
322
323(FMOV(W|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
324 (FMOV(W|D)load [off1+int32(off2)] {sym} base mem)
325
326(MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
327 (MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
328
329(MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
330 (MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
331
332(FMOV(W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
333 (FMOV(W|D)store [off1+int32(off2)] {sym} base val mem)
334
335// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
336// with OffPtr -> ADDI.
337(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
338
339// Small zeroing
340(Zero [0] _ mem) => mem
341(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
342(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
343 (MOVHstore ptr (MOVDconst [0]) mem)
344(Zero [2] ptr mem) =>
345 (MOVBstore [1] ptr (MOVDconst [0])
346 (MOVBstore ptr (MOVDconst [0]) mem))
347(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
348 (MOVWstore ptr (MOVDconst [0]) mem)
349(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
350 (MOVHstore [2] ptr (MOVDconst [0])
351 (MOVHstore ptr (MOVDconst [0]) mem))
352(Zero [4] ptr mem) =>
353 (MOVBstore [3] ptr (MOVDconst [0])
354 (MOVBstore [2] ptr (MOVDconst [0])
355 (MOVBstore [1] ptr (MOVDconst [0])
356 (MOVBstore ptr (MOVDconst [0]) mem))))
357(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
358 (MOVDstore ptr (MOVDconst [0]) mem)
359(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
360 (MOVWstore [4] ptr (MOVDconst [0])
361 (MOVWstore ptr (MOVDconst [0]) mem))
362(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
363 (MOVHstore [6] ptr (MOVDconst [0])
364 (MOVHstore [4] ptr (MOVDconst [0])
365 (MOVHstore [2] ptr (MOVDconst [0])
366 (MOVHstore ptr (MOVDconst [0]) mem))))
367
368(Zero [3] ptr mem) =>
369 (MOVBstore [2] ptr (MOVDconst [0])
370 (MOVBstore [1] ptr (MOVDconst [0])
371 (MOVBstore ptr (MOVDconst [0]) mem)))
372(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
373 (MOVHstore [4] ptr (MOVDconst [0])
374 (MOVHstore [2] ptr (MOVDconst [0])
375 (MOVHstore ptr (MOVDconst [0]) mem)))
376
377// Unroll zeroing in medium size (at most 192 bytes i.e. 3 cachelines)
378(Zero [s] {t} ptr mem) && s <= 24*moveSize(t.Alignment(), config) =>
379 (LoweredZero [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
380
381// Generic zeroing uses a loop
382(Zero [s] {t} ptr mem) && s > 24*moveSize(t.Alignment(), config) =>
383 (LoweredZeroLoop [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
384
385// Checks
386(IsNonNil ...) => (SNEZ ...)
387(IsInBounds ...) => (Less64U ...)
388(IsSliceInBounds ...) => (Leq64U ...)
389
390// Trivial lowering
391(NilCheck ...) => (LoweredNilCheck ...)
392(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
393(GetCallerSP ...) => (LoweredGetCallerSP ...)
394(GetCallerPC ...) => (LoweredGetCallerPC ...)
395
396// Write barrier.
397(WB ...) => (LoweredWB ...)
398
399// Publication barrier as intrinsic
400(PubBarrier ...) => (LoweredPubBarrier ...)
401
402(PanicBounds ...) => (LoweredPanicBoundsRR ...)
403(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
404(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
405(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
406(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
407
408// Small moves
409(Move [0] _ _ mem) => mem
410(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
411(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
412 (MOVHstore dst (MOVHload src mem) mem)
413(Move [2] dst src mem) =>
414 (MOVBstore [1] dst (MOVBload [1] src mem)
415 (MOVBstore dst (MOVBload src mem) mem))
416(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
417 (MOVWstore dst (MOVWload src mem) mem)
418(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
419 (MOVHstore [2] dst (MOVHload [2] src mem)
420 (MOVHstore dst (MOVHload src mem) mem))
421(Move [4] dst src mem) =>
422 (MOVBstore [3] dst (MOVBload [3] src mem)
423 (MOVBstore [2] dst (MOVBload [2] src mem)
424 (MOVBstore [1] dst (MOVBload [1] src mem)
425 (MOVBstore dst (MOVBload src mem) mem))))
426(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
427 (MOVDstore dst (MOVDload src mem) mem)
428(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
429 (MOVWstore [4] dst (MOVWload [4] src mem)
430 (MOVWstore dst (MOVWload src mem) mem))
431(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
432 (MOVHstore [6] dst (MOVHload [6] src mem)
433 (MOVHstore [4] dst (MOVHload [4] src mem)
434 (MOVHstore [2] dst (MOVHload [2] src mem)
435 (MOVHstore dst (MOVHload src mem) mem))))
436
437(Move [3] dst src mem) =>
438 (MOVBstore [2] dst (MOVBload [2] src mem)
439 (MOVBstore [1] dst (MOVBload [1] src mem)
440 (MOVBstore dst (MOVBload src mem) mem)))
441(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
442 (MOVHstore [4] dst (MOVHload [4] src mem)
443 (MOVHstore [2] dst (MOVHload [2] src mem)
444 (MOVHstore dst (MOVHload src mem) mem)))
445
446// Generic move
447(Move [s] {t} dst src mem) && s > 0 && s <= 3*8*moveSize(t.Alignment(), config)
448 && logLargeCopy(v, s) =>
449 (LoweredMove [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
450
451// Generic move uses a loop
452(Move [s] {t} dst src mem) && s > 3*8*moveSize(t.Alignment(), config)
453 && logLargeCopy(v, s) =>
454 (LoweredMoveLoop [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
455
456// Boolean ops; 0=false, 1=true
457(AndB ...) => (AND ...)
458(OrB ...) => (OR ...)
459(EqB x y) => (SEQZ (SUB <typ.Bool> x y))
460(NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
461(Not ...) => (SEQZ ...)
462
463// Lowering pointer arithmetic
464// TODO: Special handling for SP offsets, like ARM
465(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
466(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
467(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
468
469(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
470(Const(64|32)F ...) => (FMOV(D|F)const ...)
471(ConstNil) => (MOVDconst [0])
472(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
473
474(Addr {sym} base) => (MOVaddr {sym} [0] base)
475(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
476(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVaddr {sym} base)
477
478// Calls
479(StaticCall ...) => (CALLstatic ...)
480(ClosureCall ...) => (CALLclosure ...)
481(InterCall ...) => (CALLinter ...)
482(TailCall ...) => (CALLtail ...)
483
484// Atomic Intrinsics
485(AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...)
486(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
487(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
488
489// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
490(AtomicAnd8 ptr val mem) =>
491 (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
492 (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
493 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
494
495(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
496
497(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
498(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
499
500(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
501
502// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
503(AtomicOr8 ptr val mem) =>
504 (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
505 (SLL <typ.UInt32> (ZeroExt8to32 val)
506 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
507
508(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
509
510// Conditional branches
511(If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
512
513// Optimizations
514
515// Absorb SEQZ/SNEZ into branch.
516(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
517(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
518(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
519(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
520
521// Remove redundant NEG from BEQZ/BNEZ.
522(BEQZ (NEG x) yes no) => (BEQZ x yes no)
523(BNEZ (NEG x) yes no) => (BNEZ x yes no)
524
525// Negate comparison with FNES/FNED.
526(BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
527(BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
528(BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
529(BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
530
531// Convert BEQZ/BNEZ into more optimal branch conditions.
532(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
533(BNEZ (SUB x y) yes no) => (BNE x y yes no)
534(BEQZ (SLT x y) yes no) => (BGE x y yes no)
535(BNEZ (SLT x y) yes no) => (BLT x y yes no)
536(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
537(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
538(BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
539(BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
540(BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
541(BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
542
543// Convert branch with zero to more optimal branch zero.
544(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
545(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
546(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
547(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
548(BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
549(BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
550(BLTU (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
551(BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
552(BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
553(BGEU (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
554
555// Remove redundant NEG from SEQZ/SNEZ.
556(SEQZ (NEG x)) => (SEQZ x)
557(SNEZ (NEG x)) => (SNEZ x)
558
559// Remove redundant SEQZ/SNEZ.
560(SEQZ (SEQZ x)) => (SNEZ x)
561(SEQZ (SNEZ x)) => (SEQZ x)
562(SNEZ (SEQZ x)) => (SEQZ x)
563(SNEZ (SNEZ x)) => (SNEZ x)
564
565// Store zero.
566(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
567(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
568(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
569(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
570
571// Boolean ops are already extended.
572(MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
573(MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
574(MOVBUreg x:((SEQZ|SNEZ) _)) => x
575(MOVBUreg x:((SLT|SLTU) _ _)) => x
576
577// Avoid extending when already sufficiently masked.
578(MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
579(MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
580(MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
581(MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
582(MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
583(MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
584
585// Combine masking and zero extension.
586(MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
587(MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
588(MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
589
590// Combine negation and sign extension.
591(MOVWreg (NEG x)) => (NEGW x)
592
593// Avoid sign/zero extension for consts.
594(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
595(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
596(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
597(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
598(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
599(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
600
601// Avoid sign/zero extension after properly typed load.
602(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
603(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
604(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
605(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
606(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
607(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
608(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
609(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
610(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
611(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
612(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
613(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
614(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
615(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
616(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
617
618// Avoid zero extension after properly typed atomic operation.
619(MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
620(MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
621(MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
622
623// Avoid sign extension after word arithmetic.
624(MOVWreg x:(ADDIW _)) => (MOVDreg x)
625(MOVWreg x:(SUBW _ _)) => (MOVDreg x)
626(MOVWreg x:(NEGW _)) => (MOVDreg x)
627(MOVWreg x:(MULW _ _)) => (MOVDreg x)
628(MOVWreg x:(DIVW _ _)) => (MOVDreg x)
629(MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
630(MOVWreg x:(REMW _ _)) => (MOVDreg x)
631(MOVWreg x:(REMUW _ _)) => (MOVDreg x)
632(MOVWreg x:(ROLW _ _)) => (MOVDreg x)
633(MOVWreg x:(RORW _ _)) => (MOVDreg x)
634(MOVWreg x:(RORIW _)) => (MOVDreg x)
635
636// Fold double extensions.
637(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
638(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
639(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
640(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
641(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
642(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
643(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
644(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
645(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
646(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
647(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
648(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
649(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
650(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
651
652// Do not extend before store.
653(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
654(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
655(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
656(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
657(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
658(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
659(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
660(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
661(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
662(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
663(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
664(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
665
666// Replace extend after load with alternate load where possible.
667(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
668(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
669(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
670(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
671(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
672(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
673
674// Replace load from same location as preceding store with copy.
675(MOV(D|W|H|B)load [off] {sym} ptr1 (MOV(D|W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(D|W|H|B)reg x)
676(MOV(W|H|B)Uload [off] {sym} ptr1 (MOV(W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(W|H|B)Ureg x)
677(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
678(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
679(MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)
680(MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWUreg (FMVXS x))
681(FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVSX x)
682
683// If a register move has only 1 use, just use the same register without emitting instruction
684// MOVnop does not emit an instruction, only for ensuring the type.
685(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
686
687// TODO: we should be able to get rid of MOVDnop all together.
688// But for now, this is enough to get rid of lots of them.
689(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
690
691// Avoid unnecessary zero and sign extension when right shifting.
692(SRAI [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW [x] y)
693(SRLI [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW [x] y)
694
695// Replace right shifts that exceed size of signed type.
696(SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y))
697(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y))
698(SRAI [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
699
700// Eliminate right shifts that exceed size of unsigned type.
701(SRLI [x] (MOVBUreg y)) && x >= 8 => (MOVDconst [0])
702(SRLI [x] (MOVHUreg y)) && x >= 16 => (MOVDconst [0])
703(SRLI [x] (MOVWUreg y)) && x >= 32 => (MOVDconst [0])
704
705// Fold constant into immediate instructions where possible.
706(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
707(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
708(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
709(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
710(ROL x (MOVDconst [val])) => (RORI [-val&63] x)
711(ROLW x (MOVDconst [val])) => (RORIW [-val&31] x)
712(ROR x (MOVDconst [val])) => (RORI [val&63] x)
713(RORW x (MOVDconst [val])) => (RORIW [val&31] x)
714(SLL x (MOVDconst [val])) => (SLLI [val&63] x)
715(SLLW x (MOVDconst [val])) => (SLLIW [val&31] x)
716(SRL x (MOVDconst [val])) => (SRLI [val&63] x)
717(SRLW x (MOVDconst [val])) => (SRLIW [val&31] x)
718(SRA x (MOVDconst [val])) => (SRAI [val&63] x)
719(SRAW x (MOVDconst [val])) => (SRAIW [val&31] x)
720(SLT x (MOVDconst [val])) && is12Bit(val) => (SLTI [val] x)
721(SLTU x (MOVDconst [val])) && is12Bit(val) => (SLTIU [val] x)
722
723// Replace negated left rotation with right rotation.
724(ROL x (NEG y)) => (ROR x y)
725(ROLW x (NEG y)) => (RORW x y)
726
727// generic simplifications
728(ADD x (NEG y)) => (SUB x y)
729(SUB x (NEG y)) => (ADD x y)
730(SUB x x) => (MOVDconst [0])
731(AND x x) => x
732(OR x x) => x
733(ORN x x) => (MOVDconst [-1])
734(XOR x x) => (MOVDconst [0])
735
736// Convert const subtraction into ADDI with negative immediate, where possible.
737(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
738(SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
739
740// Subtraction of zero.
741(SUB x (MOVDconst [0])) => x
742(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
743
744// Subtraction from zero.
745(SUB (MOVDconst [0]) x) => (NEG x)
746(SUBW (MOVDconst [0]) x) => (NEGW x)
747
748// Fold negation into subtraction.
749(NEG (SUB x y)) => (SUB y x)
750(NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
751
752// Double negation.
753(NEG (NEG x)) => x
754(NEG <t> s:(ADDI [val] (NEG x))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] x)
755
756// Addition of zero or two constants.
757(ADDI [0] x) => x
758(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
759
760// ANDI with all zeros, all ones or two constants.
761(ANDI [0] x) => (MOVDconst [0])
762(ANDI [-1] x) => x
763(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
764
765// ORI with all zeroes, all ones or two constants.
766(ORI [0] x) => x
767(ORI [-1] x) => (MOVDconst [-1])
768(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
769
770// Combine operations with immediate.
771(ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
772(ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
773(ORI [x] (ORI [y] z)) => (ORI [x | y] z)
774
775// Negation of a constant.
776(NEG (MOVDconst [x])) => (MOVDconst [-x])
777(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
778
779// Shift of a constant.
780(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
781(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
782(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
783
784// Combine doubling via addition with shift.
785(SLLI <t> [c] (ADD x x)) && c < t.Size() * 8 - 1 => (SLLI [c+1] x)
786(SLLI <t> [c] (ADD x x)) && c >= t.Size() * 8 - 1 => (MOVDconst [0])
787
788// SLTI/SLTIU with constants.
789(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
790(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
791
792// SLTI/SLTIU with known outcomes.
793(SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
794(SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
795(SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
796
797// SLT/SLTU with known outcomes.
798(SLT x x) => (MOVDconst [0])
799(SLTU x x) => (MOVDconst [0])
800
801// Deadcode for LoweredMuluhilo
802(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
803(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
804
805(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
806(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
807(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
808
809// Merge negation into fused multiply-add and multiply-subtract.
810//
811// Key:
812//
813// [+ -](x * y [+ -] z).
814// _ N A S
815// D U
816// D B
817//
818// Note: multiplication commutativity handled by rule generator.
819(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
820(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
821(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
822(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
823
824// Test for -∞ (bit 0) using 64 bit classify instruction.
825(FLTD x (FMOVDconst [-math.MaxFloat64])) => (ANDI [0b00_0000_0001] (FCLASSD x))
826(FLED (FMOVDconst [-math.MaxFloat64]) x) => (SNEZ (ANDI <typ.Int64> [0b00_1111_1110] (FCLASSD x)))
827(FEQD x (FMOVDconst [math.Inf(-1)])) => (ANDI [0b00_0000_0001] (FCLASSD x))
828(FNED x (FMOVDconst [math.Inf(-1)])) => (SEQZ (ANDI <typ.Int64> [0b00_0000_0001] (FCLASSD x)))
829
830// Test for +∞ (bit 7) using 64 bit classify instruction.
831(FLTD (FMOVDconst [math.MaxFloat64]) x) => (SNEZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
832(FLED x (FMOVDconst [math.MaxFloat64])) => (SNEZ (ANDI <typ.Int64> [0b00_0111_1111] (FCLASSD x)))
833(FEQD x (FMOVDconst [math.Inf(1)])) => (SNEZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
834(FNED x (FMOVDconst [math.Inf(1)])) => (SEQZ (ANDI <typ.Int64> [0b00_1000_0000] (FCLASSD x)))
835
836// Test for subnormal numbers using 64 bit classify instruction.
837(FLTD x (FMOVDconst [+0x1p-1022])) => (SNEZ (ANDI <typ.Int64> [0b00_0011_1111] (FCLASSD x)))
838(FLED (FMOVDconst [+0x1p-1022]) x) => (SNEZ (ANDI <typ.Int64> [0b00_1100_0000] (FCLASSD x)))
839(FLED x (FMOVDconst [-0x1p-1022])) => (SNEZ (ANDI <typ.Int64> [0b00_0000_0011] (FCLASSD x)))
840(FLTD (FMOVDconst [-0x1p-1022]) x) => (SNEZ (ANDI <typ.Int64> [0b00_1111_1100] (FCLASSD x)))
841
842// Absorb unary sign bit operations into 64 bit classify instruction.
843(S(EQ|NE)Z (ANDI [c] (FCLASSD (FNEGD x)))) => (S(EQ|NE)Z (ANDI <typ.Int64> [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)))
844(S(EQ|NE)Z (ANDI [c] (FCLASSD (FABSD x)))) => (S(EQ|NE)Z (ANDI <typ.Int64> [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)))
845(B(EQ|NE)Z (ANDI [c] (FCLASSD (FNEGD x))) yes no) => (B(EQ|NE)Z (ANDI <typ.Int64> [(c&0b11_0000_0000)|int64(bits.Reverse8(uint8(c))&0b1111_1111)] (FCLASSD x)) yes no)
846(B(EQ|NE)Z (ANDI [c] (FCLASSD (FABSD x))) yes no) => (B(EQ|NE)Z (ANDI <typ.Int64> [(c&0b11_1111_0000)|int64(bits.Reverse8(uint8(c))&0b0000_1111)] (FCLASSD x)) yes no)
847
848//
849// Optimisations for rva22u64 and above.
850//
851
852// Combine left shift and addition.
853(ADD (SLLI [1] x) y) && buildcfg.GORISCV64 >= 22 => (SH1ADD x y)
854(ADD (SLLI [2] x) y) && buildcfg.GORISCV64 >= 22 => (SH2ADD x y)
855(ADD (SLLI [3] x) y) && buildcfg.GORISCV64 >= 22 => (SH3ADD x y)
856
857// Integer minimum and maximum.
858(Min64 x y) && buildcfg.GORISCV64 >= 22 => (MIN x y)
859(Max64 x y) && buildcfg.GORISCV64 >= 22 => (MAX x y)
860(Min64u x y) && buildcfg.GORISCV64 >= 22 => (MINU x y)
861(Max64u x y) && buildcfg.GORISCV64 >= 22 => (MAXU x y)
View as plain text