1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Simplifications that apply to all backend architectures. As an example, this
6// Go source code
7//
8// y := 0 * x
9//
10// can be translated into y := 0 without losing any information, which saves a
11// pointless multiplication instruction. Other .rules files in this directory
12// (for example AMD64.rules) contain rules specific to the architecture in the
13// filename. The rules here apply to every architecture.
14//
15// The code for parsing this file lives in rulegen.go; this file generates
16// ssa/rewritegeneric.go.
17
18// values are specified using the following format:
19// (op <type> [auxint] {aux} arg0 arg1 ...)
20// the type, aux, and auxint fields are optional
21// on the matching side
22// - the type, aux, and auxint fields must match if they are specified.
23// - the first occurrence of a variable defines that variable. Subsequent
24// uses must match (be == to) the first use.
25// - v is defined to be the value matched.
26// - an additional conditional can be provided after the match pattern with "&&".
27// on the generated side
28// - the type of the top-level expression is the same as the one on the left-hand side.
29// - the type of any subexpressions must be specified explicitly (or
30// be specified in the op's type field).
31// - auxint will be 0 if not specified.
32// - aux will be nil if not specified.
33
34// blocks are specified using the following format:
35// (kind controlvalue succ0 succ1 ...)
36// controlvalue must be "nil" or a value expression
37// succ* fields must be variables
38// For now, the generated successors must be a permutation of the matched successors.
39
40// constant folding
41(Trunc16to8 (Const16 [c])) => (Const8 [int8(c)])
42(Trunc32to8 (Const32 [c])) => (Const8 [int8(c)])
43(Trunc32to16 (Const32 [c])) => (Const16 [int16(c)])
44(Trunc64to8 (Const64 [c])) => (Const8 [int8(c)])
45(Trunc64to16 (Const64 [c])) => (Const16 [int16(c)])
46(Trunc64to32 (Const64 [c])) => (Const32 [int32(c)])
47(Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)])
48(Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)])
49(Cvt32to32F (Const32 [c])) => (Const32F [float32(c)])
50(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)])
51(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)])
52(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)])
53(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)])
54(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)])
55(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)])
56(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)])
57(Round32F x:(Const32F)) => x
58(Round64F x:(Const64F)) => x
59(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0])
60(CvtBoolToUint8 (ConstBool [true])) => (Const8 [1])
61(BitLen64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.Len64(uint64(c)))])
62(BitLen32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.Len32(uint32(c)))])
63(BitLen16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.Len16(uint16(c)))])
64(BitLen8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.Len8(uint8(c)))])
65(BitLen64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.Len64(uint64(c)))])
66(BitLen32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.Len32(uint32(c)))])
67(BitLen16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.Len16(uint16(c)))])
68(BitLen8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.Len8(uint8(c)))])
69(PopCount64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.OnesCount64(uint64(c)))])
70(PopCount32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.OnesCount32(uint32(c)))])
71(PopCount16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.OnesCount16(uint16(c)))])
72(PopCount8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(bits.OnesCount8(uint8(c)))])
73(PopCount64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.OnesCount64(uint64(c)))])
74(PopCount32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.OnesCount32(uint32(c)))])
75(PopCount16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.OnesCount16(uint16(c)))])
76(PopCount8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(bits.OnesCount8(uint8(c)))])
77(Add64carry (Const64 <t> [x]) (Const64 [y]) (Const64 [c])) && c >= 0 && c <= 1 => (MakeTuple (Const64 <t> [bitsAdd64(x, y, c).sum]) (Const64 <t> [bitsAdd64(x, y, c).carry]))
78
79(Trunc16to8 (ZeroExt8to16 x)) => x
80(Trunc32to8 (ZeroExt8to32 x)) => x
81(Trunc32to16 (ZeroExt8to32 x)) => (ZeroExt8to16 x)
82(Trunc32to16 (ZeroExt16to32 x)) => x
83(Trunc64to8 (ZeroExt8to64 x)) => x
84(Trunc64to16 (ZeroExt8to64 x)) => (ZeroExt8to16 x)
85(Trunc64to16 (ZeroExt16to64 x)) => x
86(Trunc64to32 (ZeroExt8to64 x)) => (ZeroExt8to32 x)
87(Trunc64to32 (ZeroExt16to64 x)) => (ZeroExt16to32 x)
88(Trunc64to32 (ZeroExt32to64 x)) => x
89(Trunc16to8 (SignExt8to16 x)) => x
90(Trunc32to8 (SignExt8to32 x)) => x
91(Trunc32to16 (SignExt8to32 x)) => (SignExt8to16 x)
92(Trunc32to16 (SignExt16to32 x)) => x
93(Trunc64to8 (SignExt8to64 x)) => x
94(Trunc64to16 (SignExt8to64 x)) => (SignExt8to16 x)
95(Trunc64to16 (SignExt16to64 x)) => x
96(Trunc64to32 (SignExt8to64 x)) => (SignExt8to32 x)
97(Trunc64to32 (SignExt16to64 x)) => (SignExt16to32 x)
98(Trunc64to32 (SignExt32to64 x)) => x
99
100(ZeroExt8to16 (Const8 [c])) => (Const16 [int16( uint8(c))])
101(ZeroExt8to32 (Const8 [c])) => (Const32 [int32( uint8(c))])
102(ZeroExt8to64 (Const8 [c])) => (Const64 [int64( uint8(c))])
103(ZeroExt16to32 (Const16 [c])) => (Const32 [int32(uint16(c))])
104(ZeroExt16to64 (Const16 [c])) => (Const64 [int64(uint16(c))])
105(ZeroExt32to64 (Const32 [c])) => (Const64 [int64(uint32(c))])
106(SignExt8to16 (Const8 [c])) => (Const16 [int16(c)])
107(SignExt8to32 (Const8 [c])) => (Const32 [int32(c)])
108(SignExt8to64 (Const8 [c])) => (Const64 [int64(c)])
109(SignExt16to32 (Const16 [c])) => (Const32 [int32(c)])
110(SignExt16to64 (Const16 [c])) => (Const64 [int64(c)])
111(SignExt32to64 (Const32 [c])) => (Const64 [int64(c)])
112
113(Neg8 (Const8 [c])) => (Const8 [-c])
114(Neg16 (Const16 [c])) => (Const16 [-c])
115(Neg32 (Const32 [c])) => (Const32 [-c])
116(Neg64 (Const64 [c])) => (Const64 [-c])
117(Neg32F (Const32F [c])) && c != 0 => (Const32F [-c])
118(Neg64F (Const64F [c])) && c != 0 => (Const64F [-c])
119
120(Add8 (Const8 [c]) (Const8 [d])) => (Const8 [c+d])
121(Add16 (Const16 [c]) (Const16 [d])) => (Const16 [c+d])
122(Add32 (Const32 [c]) (Const32 [d])) => (Const32 [c+d])
123(Add64 (Const64 [c]) (Const64 [d])) => (Const64 [c+d])
124(Add32F (Const32F [c]) (Const32F [d])) && c+d == c+d => (Const32F [c+d])
125(Add64F (Const64F [c]) (Const64F [d])) && c+d == c+d => (Const64F [c+d])
126(AddPtr <t> x (Const64 [c])) => (OffPtr <t> x [c])
127(AddPtr <t> x (Const32 [c])) => (OffPtr <t> x [int64(c)])
128
129(Sub8 (Const8 [c]) (Const8 [d])) => (Const8 [c-d])
130(Sub16 (Const16 [c]) (Const16 [d])) => (Const16 [c-d])
131(Sub32 (Const32 [c]) (Const32 [d])) => (Const32 [c-d])
132(Sub64 (Const64 [c]) (Const64 [d])) => (Const64 [c-d])
133(Sub32F (Const32F [c]) (Const32F [d])) && c-d == c-d => (Const32F [c-d])
134(Sub64F (Const64F [c]) (Const64F [d])) && c-d == c-d => (Const64F [c-d])
135
136(Mul8 (Const8 [c]) (Const8 [d])) => (Const8 [c*d])
137(Mul16 (Const16 [c]) (Const16 [d])) => (Const16 [c*d])
138(Mul32 (Const32 [c]) (Const32 [d])) => (Const32 [c*d])
139(Mul64 (Const64 [c]) (Const64 [d])) => (Const64 [c*d])
140(Mul32F (Const32F [c]) (Const32F [d])) && c*d == c*d => (Const32F [c*d])
141(Mul64F (Const64F [c]) (Const64F [d])) && c*d == c*d => (Const64F [c*d])
142(Mul32uhilo (Const32 [c]) (Const32 [d])) => (MakeTuple (Const32 <typ.UInt32> [bitsMulU32(c, d).hi]) (Const32 <typ.UInt32> [bitsMulU32(c,d).lo]))
143(Mul64uhilo (Const64 [c]) (Const64 [d])) => (MakeTuple (Const64 <typ.UInt64> [bitsMulU64(c, d).hi]) (Const64 <typ.UInt64> [bitsMulU64(c,d).lo]))
144(Mul32uover (Const32 [c]) (Const32 [d])) => (MakeTuple (Const32 <typ.UInt32> [bitsMulU32(c, d).lo]) (ConstBool <typ.Bool> [bitsMulU32(c,d).hi != 0]))
145(Mul64uover (Const64 [c]) (Const64 [d])) => (MakeTuple (Const64 <typ.UInt64> [bitsMulU64(c, d).lo]) (ConstBool <typ.Bool> [bitsMulU64(c,d).hi != 0]))
146
147(And8 (Const8 [c]) (Const8 [d])) => (Const8 [c&d])
148(And16 (Const16 [c]) (Const16 [d])) => (Const16 [c&d])
149(And32 (Const32 [c]) (Const32 [d])) => (Const32 [c&d])
150(And64 (Const64 [c]) (Const64 [d])) => (Const64 [c&d])
151
152(Or8 (Const8 [c]) (Const8 [d])) => (Const8 [c|d])
153(Or16 (Const16 [c]) (Const16 [d])) => (Const16 [c|d])
154(Or32 (Const32 [c]) (Const32 [d])) => (Const32 [c|d])
155(Or64 (Const64 [c]) (Const64 [d])) => (Const64 [c|d])
156
157(Xor8 (Const8 [c]) (Const8 [d])) => (Const8 [c^d])
158(Xor16 (Const16 [c]) (Const16 [d])) => (Const16 [c^d])
159(Xor32 (Const32 [c]) (Const32 [d])) => (Const32 [c^d])
160(Xor64 (Const64 [c]) (Const64 [d])) => (Const64 [c^d])
161
162(Ctz64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz64(c))])
163(Ctz32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz32(c))])
164(Ctz16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz16(c))])
165(Ctz8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz8(c))])
166
167(Ctz64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz64(c))])
168(Ctz32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz32(c))])
169(Ctz16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz16(c))])
170(Ctz8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz8(c))])
171
172(Div8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c/d])
173(Div16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c/d])
174(Div32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c/d])
175(Div64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c/d])
176(Div8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c)/uint8(d))])
177(Div16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c)/uint16(d))])
178(Div32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c)/uint32(d))])
179(Div64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c)/uint64(d))])
180(Div32F (Const32F [c]) (Const32F [d])) && c/d == c/d => (Const32F [c/d])
181(Div64F (Const64F [c]) (Const64F [d])) && c/d == c/d => (Const64F [c/d])
182(Div128u <t> (Const64 [0]) lo y) => (MakeTuple (Div64u <t.FieldType(0)> lo y) (Mod64u <t.FieldType(1)> lo y))
183
184(Not (ConstBool [c])) => (ConstBool [!c])
185
186(Floor (Const64F [c])) => (Const64F [math.Floor(c)])
187(Ceil (Const64F [c])) => (Const64F [math.Ceil(c)])
188(Trunc (Const64F [c])) => (Const64F [math.Trunc(c)])
189(RoundToEven (Const64F [c])) => (Const64F [math.RoundToEven(c)])
190
191// Convert x * 1 to x.
192(Mul(8|16|32|64) (Const(8|16|32|64) [1]) x) => x
193(Mul(32|64)uover <t> (Const(32|64) [1]) x) => (MakeTuple x (ConstBool <t.FieldType(1)> [false]))
194
195// Convert x * -1 to -x.
196(Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x)
197
198// Convert -x * c to x * -c
199(Mul(8|16|32|64) (Const(8|16|32|64) <t> [c]) (Neg(8|16|32|64) x)) => (Mul(8|16|32|64) x (Const(8|16|32|64) <t> [-c]))
200
201(Mul(8|16|32|64) (Neg(8|16|32|64) x) (Neg(8|16|32|64) y)) => (Mul(8|16|32|64) x y)
202
203// simplify negative on mul if possible
204(Neg(8|16|32|64) (Mul(8|16|32|64) x (Const(8|16|32|64) <t> [c]))) => (Mul(8|16|32|64) x (Const(8|16|32|64) <t> [-c]))
205(Neg(8|16|32|64) (Mul(8|16|32|64) x (Neg(8|16|32|64) y))) => (Mul(8|16|32|64) x y)
206
207// DeMorgan's Laws
208(And(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (Or(8|16|32|64) <t> x y))
209(Or(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (And(8|16|32|64) <t> x y))
210
211(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d])
212(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d])
213(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d])
214(Mod64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c % d])
215
216(Mod8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c) % uint8(d))])
217(Mod16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c) % uint16(d))])
218(Mod32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c) % uint32(d))])
219(Mod64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c) % uint64(d))])
220
221(Lsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c << uint64(d)])
222(Rsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c >> uint64(d)])
223(Rsh64Ux64 (Const64 [c]) (Const64 [d])) => (Const64 [int64(uint64(c) >> uint64(d))])
224(Lsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c << uint64(d)])
225(Rsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c >> uint64(d)])
226(Rsh32Ux64 (Const32 [c]) (Const64 [d])) => (Const32 [int32(uint32(c) >> uint64(d))])
227(Lsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c << uint64(d)])
228(Rsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c >> uint64(d)])
229(Rsh16Ux64 (Const16 [c]) (Const64 [d])) => (Const16 [int16(uint16(c) >> uint64(d))])
230(Lsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c << uint64(d)])
231(Rsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c >> uint64(d)])
232(Rsh8Ux64 (Const8 [c]) (Const64 [d])) => (Const8 [int8(uint8(c) >> uint64(d))])
233
234// Fold IsInBounds when the range of the index cannot exceed the limit.
235(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c => (ConstBool [true])
236(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c => (ConstBool [true])
237(IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c => (ConstBool [true])
238(IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c => (ConstBool [true])
239(IsInBounds x x) => (ConstBool [false])
240(IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) && 0 <= c && c < d => (ConstBool [true])
241(IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) && 0 <= c && int16(c) < d => (ConstBool [true])
242(IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
243(IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
244(IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) && 0 <= c && c < d => (ConstBool [true])
245(IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
246(IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
247(IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c < d => (ConstBool [true])
248(IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
249(IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c < d => (ConstBool [true])
250(IsInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c < d])
251(IsInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c < d])
252// (Mod64u x y) is always between 0 (inclusive) and y (exclusive).
253(IsInBounds (Mod32u _ y) y) => (ConstBool [true])
254(IsInBounds (Mod64u _ y) y) => (ConstBool [true])
255// Right shifting an unsigned number limits its value.
256(IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
257(IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
258(IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
259(IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
260(IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
261(IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
262(IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
263(IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
264(IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
265(IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1<<uint(64-c)-1 < d => (ConstBool [true])
266
267(IsSliceInBounds x x) => (ConstBool [true])
268(IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d => (ConstBool [true])
269(IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c <= d => (ConstBool [true])
270(IsSliceInBounds (Const32 [0]) _) => (ConstBool [true])
271(IsSliceInBounds (Const64 [0]) _) => (ConstBool [true])
272(IsSliceInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c <= d])
273(IsSliceInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c <= d])
274(IsSliceInBounds (SliceLen x) (SliceCap x)) => (ConstBool [true])
275
276(Eq(64|32|16|8) x x) => (ConstBool [true])
277(EqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c == d])
278(EqB (ConstBool [false]) x) => (Not x)
279(EqB (ConstBool [true]) x) => x
280
281(Neq(64|32|16|8) x x) => (ConstBool [false])
282(NeqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c != d])
283(NeqB (ConstBool [false]) x) => x
284(NeqB (ConstBool [true]) x) => (Not x)
285(NeqB (Not x) (Not y)) => (NeqB x y)
286
287(Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Eq64 (Const64 <t> [c-d]) x)
288(Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Eq32 (Const32 <t> [c-d]) x)
289(Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Eq16 (Const16 <t> [c-d]) x)
290(Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Eq8 (Const8 <t> [c-d]) x)
291
292(Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Neq64 (Const64 <t> [c-d]) x)
293(Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Neq32 (Const32 <t> [c-d]) x)
294(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Neq16 (Const16 <t> [c-d]) x)
295(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Neq8 (Const8 <t> [c-d]) x)
296
297(CondSelect x _ (ConstBool [true ])) => x
298(CondSelect _ y (ConstBool [false])) => y
299(CondSelect x x _) => x
300
301// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
302(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
303(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
304(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
305(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
306
307// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
308(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
309(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
310(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
311(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
312
313// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
314(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
315(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
316(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
317(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
318
319// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
320(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
321(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
322(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
323(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
324
325// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
326(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
327(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
328(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
329(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
330
331// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
332(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
333(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
334(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
335(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
336
337// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
338(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
339(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
340(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
341(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
342
343// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
344(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
345(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
346(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
347(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
348
349// single bit difference: ( x != c && x != d ) -> ( x|(c^d) != c )
350(AndB (Neq(64|32|16|8) x cv:(Const(64|32|16|8) [c])) (Neq(64|32|16|8) x (Const(64|32|16|8) [d]))) && c|d == c && oneBit(c^d) => (Neq(64|32|16|8) (Or(64|32|16|8) <x.Type> x (Const(64|32|16|8) <x.Type> [c^d])) cv)
351
352// single bit difference: ( x == c || x == d ) -> ( x|(c^d) == c )
353(OrB (Eq(64|32|16|8) x cv:(Const(64|32|16|8) [c])) (Eq(64|32|16|8) x (Const(64|32|16|8) [d]))) && c|d == c && oneBit(c^d) => (Eq(64|32|16|8) (Or(64|32|16|8) <x.Type> x (Const(64|32|16|8) <x.Type> [c^d])) cv)
354
355// NaN check: ( x != x || x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) x) )
356(OrB (Neq64F x x) ((Less|Leq)64F x y:(Const64F [c]))) => (Not ((Leq|Less)64F y x))
357(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) x)) => (Not ((Leq|Less)64F x y))
358(OrB (Neq32F x x) ((Less|Leq)32F x y:(Const32F [c]))) => (Not ((Leq|Less)32F y x))
359(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) x)) => (Not ((Leq|Less)32F x y))
360
361// NaN check: ( x != x || Abs(x) (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) Abs(x) )
362(OrB (Neq64F x x) ((Less|Leq)64F abs:(Abs x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y abs))
363(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) abs:(Abs x))) => (Not ((Leq|Less)64F abs y))
364
365// NaN check: ( x != x || -x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) -x) )
366(OrB (Neq64F x x) ((Less|Leq)64F neg:(Neg64F x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y neg))
367(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) neg:(Neg64F x))) => (Not ((Leq|Less)64F neg y))
368(OrB (Neq32F x x) ((Less|Leq)32F neg:(Neg32F x) y:(Const32F [c]))) => (Not ((Leq|Less)32F y neg))
369(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) neg:(Neg32F x))) => (Not ((Leq|Less)32F neg y))
370
371// Canonicalize x-const to x+(-const)
372(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 => (Add64 (Const64 <t> [-c]) x)
373(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 => (Add32 (Const32 <t> [-c]) x)
374(Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 => (Add16 (Const16 <t> [-c]) x)
375(Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 => (Add8 (Const8 <t> [-c]) x)
376
377// fold negation into comparison operators
378(Not (Eq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Neq(64|32|16|8|B|Ptr|64F|32F) x y)
379(Not (Neq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Eq(64|32|16|8|B|Ptr|64F|32F) x y)
380
381(Not (Less(64|32|16|8) x y)) => (Leq(64|32|16|8) y x)
382(Not (Less(64|32|16|8)U x y)) => (Leq(64|32|16|8)U y x)
383(Not (Leq(64|32|16|8) x y)) => (Less(64|32|16|8) y x)
384(Not (Leq(64|32|16|8)U x y)) => (Less(64|32|16|8)U y x)
385
386// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for:
387// a[i].b = ...; a[i+1].b = ...
388// The !isPowerOfTwo is a kludge to keep a[i+1] using an index by a multiply,
389// which turns into an index by a shift, which can use a shifted operand on ARM systems.
390(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) && !isPowerOfTwo(c) =>
391 (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
392(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) && !isPowerOfTwo(c) =>
393 (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
394(Mul16 (Const16 <t> [c]) (Add16 <t> (Const16 <t> [d]) x)) && !isPowerOfTwo(c) =>
395 (Add16 (Const16 <t> [c*d]) (Mul16 <t> (Const16 <t> [c]) x))
396(Mul8 (Const8 <t> [c]) (Add8 <t> (Const8 <t> [d]) x)) && !isPowerOfTwo(c) =>
397 (Add8 (Const8 <t> [c*d]) (Mul8 <t> (Const8 <t> [c]) x))
398
399// Rewrite x*y ± x*z to x*(y±z)
400(Add(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
401 => (Mul(64|32|16|8) x (Add(64|32|16|8) <t> y z))
402(Sub(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
403 => (Mul(64|32|16|8) x (Sub(64|32|16|8) <t> y z))
404
405// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
406// the number of the other rewrite rules for const shifts
407(Lsh64x32 <t> x (Const32 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
408(Lsh64x16 <t> x (Const16 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
409(Lsh64x8 <t> x (Const8 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
410(Rsh64x32 <t> x (Const32 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
411(Rsh64x16 <t> x (Const16 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
412(Rsh64x8 <t> x (Const8 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
413(Rsh64Ux32 <t> x (Const32 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
414(Rsh64Ux16 <t> x (Const16 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
415(Rsh64Ux8 <t> x (Const8 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
416
417(Lsh32x32 <t> x (Const32 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
418(Lsh32x16 <t> x (Const16 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
419(Lsh32x8 <t> x (Const8 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
420(Rsh32x32 <t> x (Const32 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
421(Rsh32x16 <t> x (Const16 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
422(Rsh32x8 <t> x (Const8 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
423(Rsh32Ux32 <t> x (Const32 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
424(Rsh32Ux16 <t> x (Const16 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
425(Rsh32Ux8 <t> x (Const8 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
426
427(Lsh16x32 <t> x (Const32 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
428(Lsh16x16 <t> x (Const16 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
429(Lsh16x8 <t> x (Const8 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
430(Rsh16x32 <t> x (Const32 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
431(Rsh16x16 <t> x (Const16 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
432(Rsh16x8 <t> x (Const8 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
433(Rsh16Ux32 <t> x (Const32 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
434(Rsh16Ux16 <t> x (Const16 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
435(Rsh16Ux8 <t> x (Const8 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
436
437(Lsh8x32 <t> x (Const32 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
438(Lsh8x16 <t> x (Const16 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
439(Lsh8x8 <t> x (Const8 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
440(Rsh8x32 <t> x (Const32 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
441(Rsh8x16 <t> x (Const16 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
442(Rsh8x8 <t> x (Const8 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
443(Rsh8Ux32 <t> x (Const32 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
444(Rsh8Ux16 <t> x (Const16 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
445(Rsh8Ux8 <t> x (Const8 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
446
447// shifts by zero
448(Lsh(64|32|16|8)x64 x (Const64 [0])) => x
449(Rsh(64|32|16|8)x64 x (Const64 [0])) => x
450(Rsh(64|32|16|8)Ux64 x (Const64 [0])) => x
451
452// rotates by multiples of register width
453(RotateLeft64 x (Const64 [c])) && c%64 == 0 => x
454(RotateLeft32 x (Const32 [c])) && c%32 == 0 => x
455(RotateLeft16 x (Const16 [c])) && c%16 == 0 => x
456(RotateLeft8 x (Const8 [c])) && c%8 == 0 => x
457
458// zero shifted
459(Lsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
460(Rsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
461(Rsh64Ux(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
462(Lsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
463(Rsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
464(Rsh32Ux(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
465(Lsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
466(Rsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
467(Rsh16Ux(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
468(Lsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
469(Rsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
470(Rsh8Ux(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
471
472// large left shifts of all values, and right shifts of unsigned values
473((Lsh64|Rsh64U)x64 _ (Const64 [c])) && uint64(c) >= 64 => (Const64 [0])
474((Lsh32|Rsh32U)x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
475((Lsh16|Rsh16U)x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
476((Lsh8|Rsh8U)x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
477
478// combine const shifts
479(Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh64x64 x (Const64 <t> [c+d]))
480(Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh32x64 x (Const64 <t> [c+d]))
481(Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh16x64 x (Const64 <t> [c+d]))
482(Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh8x64 x (Const64 <t> [c+d]))
483
484(Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64x64 x (Const64 <t> [c+d]))
485(Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32x64 x (Const64 <t> [c+d]))
486(Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16x64 x (Const64 <t> [c+d]))
487(Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8x64 x (Const64 <t> [c+d]))
488
489(Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64Ux64 x (Const64 <t> [c+d]))
490(Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32Ux64 x (Const64 <t> [c+d]))
491(Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16Ux64 x (Const64 <t> [c+d]))
492(Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8Ux64 x (Const64 <t> [c+d]))
493
494// Remove signed right shift before an unsigned right shift that extracts the sign bit.
495(Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] )) => (Rsh8Ux64 x (Const64 <t> [7] ))
496(Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15])) => (Rsh16Ux64 x (Const64 <t> [15]))
497(Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31])) => (Rsh32Ux64 x (Const64 <t> [31]))
498(Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63])) => (Rsh64Ux64 x (Const64 <t> [63]))
499
500// Convert x>>c<<c to x&^(1<<c-1)
501(Lsh64x64 i:(Rsh(64|64U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 <v.Type> [int64(-1) << c]))
502(Lsh32x64 i:(Rsh(32|32U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 <v.Type> [int32(-1) << c]))
503(Lsh16x64 i:(Rsh(16|16U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 <v.Type> [int16(-1) << c]))
504(Lsh8x64 i:(Rsh(8|8U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 <v.Type> [int8(-1) << c]))
505// similarly for x<<c>>c
506(Rsh64Ux64 i:(Lsh64x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 <v.Type> [int64(^uint64(0)>>c)]))
507(Rsh32Ux64 i:(Lsh32x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 <v.Type> [int32(^uint32(0)>>c)]))
508(Rsh16Ux64 i:(Lsh16x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 <v.Type> [int16(^uint16(0)>>c)]))
509(Rsh8Ux64 i:(Lsh8x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 <v.Type> [int8 (^uint8 (0)>>c)]))
510
511// ((x >> c1) << c2) >> c3
512(Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
513 && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
514 => (Rsh(64|32|16|8)Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
515
516// ((x << c1) >> c2) << c3
517(Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
518 && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
519 => (Lsh(64|32|16|8)x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
520
521// (x >> c) & uppermask = 0
522(And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) && c >= int64(64-ntz64(m)) => (Const64 [0])
523(And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) && c >= int64(32-ntz32(m)) => (Const32 [0])
524(And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) && c >= int64(16-ntz16(m)) => (Const16 [0])
525(And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) && c >= int64(8-ntz8(m)) => (Const8 [0])
526
527// (x << c) & lowermask = 0
528(And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) && c >= int64(64-nlz64(m)) => (Const64 [0])
529(And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) && c >= int64(32-nlz32(m)) => (Const32 [0])
530(And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) && c >= int64(16-nlz16(m)) => (Const16 [0])
531(And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) && c >= int64(8-nlz8(m)) => (Const8 [0])
532
533// replace shifts with zero extensions
534(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
535(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
536(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
537(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
538(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
539(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
540
541// replace shifts with sign extensions
542(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (SignExt8to16 (Trunc16to8 <typ.Int8> x))
543(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (SignExt8to32 (Trunc32to8 <typ.Int8> x))
544(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (SignExt8to64 (Trunc64to8 <typ.Int8> x))
545(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (SignExt16to32 (Trunc32to16 <typ.Int16> x))
546(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (SignExt16to64 (Trunc64to16 <typ.Int16> x))
547(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (SignExt32to64 (Trunc64to32 <typ.Int32> x))
548
549// ((x >> c) & d) << e
550(Lsh64x64 (And64 (Rsh(64|64U)x64 <t> x (Const64 <t2> [c])) (Const64 [d])) (Const64 [e])) && c >= e => (And64 (Rsh(64|64U)x64 <t> x (Const64 <t2> [c-e])) (Const64 <t> [d<<e]))
551(Lsh32x64 (And32 (Rsh(32|32U)x64 <t> x (Const64 <t2> [c])) (Const32 [d])) (Const64 [e])) && c >= e => (And32 (Rsh(32|32U)x64 <t> x (Const64 <t2> [c-e])) (Const32 <t> [d<<e]))
552(Lsh16x64 (And16 (Rsh(16|16U)x64 <t> x (Const64 <t2> [c])) (Const16 [d])) (Const64 [e])) && c >= e => (And16 (Rsh(16|16U)x64 <t> x (Const64 <t2> [c-e])) (Const16 <t> [d<<e]))
553(Lsh8x64 (And8 (Rsh(8|8U)x64 <t> x (Const64 <t2> [c])) (Const8 [d])) (Const64 [e])) && c >= e => (And8 (Rsh(8|8U)x64 <t> x (Const64 <t2> [c-e])) (Const8 <t> [d<<e]))
554(Lsh64x64 (And64 (Rsh(64|64U)x64 <t> x (Const64 <t2> [c])) (Const64 [d])) (Const64 [e])) && c < e => (And64 (Lsh64x64 <t> x (Const64 <t2> [e-c])) (Const64 <t> [d<<e]))
555(Lsh32x64 (And32 (Rsh(32|32U)x64 <t> x (Const64 <t2> [c])) (Const32 [d])) (Const64 [e])) && c < e => (And32 (Lsh32x64 <t> x (Const64 <t2> [e-c])) (Const32 <t> [d<<e]))
556(Lsh16x64 (And16 (Rsh(16|16U)x64 <t> x (Const64 <t2> [c])) (Const16 [d])) (Const64 [e])) && c < e => (And16 (Lsh16x64 <t> x (Const64 <t2> [e-c])) (Const16 <t> [d<<e]))
557(Lsh8x64 (And8 (Rsh(8|8U)x64 <t> x (Const64 <t2> [c])) (Const8 [d])) (Const64 [e])) && c < e => (And8 (Lsh8x64 <t> x (Const64 <t2> [e-c])) (Const8 <t> [d<<e]))
558
559// constant comparisons
560(Eq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c == d])
561(Neq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c != d])
562(Less(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c < d])
563(Leq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c <= d])
564
565(Less64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) < uint64(d)])
566(Less32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) < uint32(d)])
567(Less16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) < uint16(d)])
568(Less8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) < uint8(d)])
569
570(Leq64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) <= uint64(d)])
571(Leq32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) <= uint32(d)])
572(Leq16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) <= uint16(d)])
573(Leq8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) <= uint8(d)])
574
575(Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) && c >= 0 => (ConstBool [true])
576(Leq16 (Const16 [0]) (And16 _ (Const16 [c]))) && c >= 0 => (ConstBool [true])
577(Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) && c >= 0 => (ConstBool [true])
578(Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) && c >= 0 => (ConstBool [true])
579
580(Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
581(Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
582(Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
583(Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
584
585// prefer equalities with zero
586(Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) && isNonNegative(x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
587(Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) && isNonNegative(x) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
588(Less(64|32|16|8)U x (Const(64|32|16|8) <t> [1])) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
589(Leq(64|32|16|8)U (Const(64|32|16|8) <t> [1]) x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
590
591// prefer comparisons with zero
592(Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) => (Leq(64|32|16|8) x (Const(64|32|16|8) <t> [0]))
593(Leq(64|32|16|8) x (Const(64|32|16|8) <t> [-1])) => (Less(64|32|16|8) x (Const(64|32|16|8) <t> [0]))
594(Leq(64|32|16|8) (Const(64|32|16|8) <t> [1]) x) => (Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
595(Less(64|32|16|8) (Const(64|32|16|8) <t> [-1]) x) => (Leq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
596
597// constant floating point comparisons
598(Eq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c == d])
599(Eq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c == d])
600(Neq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c != d])
601(Neq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c != d])
602(Less32F (Const32F [c]) (Const32F [d])) => (ConstBool [c < d])
603(Less64F (Const64F [c]) (Const64F [d])) => (ConstBool [c < d])
604(Leq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c <= d])
605(Leq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c <= d])
606
607// simplifications
608(Or(64|32|16|8) x x) => x
609(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
610(Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1])
611(Or(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
612
613(And(64|32|16|8) x x) => x
614(And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x
615(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
616(And(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [0])
617
618(Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0])
619(Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
620(Xor(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
621
622(Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
623(Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0])
624(Mul(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
625(Mul(64|32)uover <t> (Const(64|32) [0]) x) => (MakeTuple (Const(64|32) <t.FieldType(0)> [0]) (ConstBool <t.FieldType(1)> [false]))
626
627(Com(64|32|16|8) (Com(64|32|16|8) x)) => x
628(Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c])
629
630(Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x)
631(Add(64|32|16|8) x (Neg(64|32|16|8) y)) => (Sub(64|32|16|8) x y)
632
633(Xor(64|32|16|8) (Const(64|32|16|8) [-1]) x) => (Com(64|32|16|8) x)
634
635(Sub(64|32|16|8) (Neg(64|32|16|8) x) (Com(64|32|16|8) x)) => (Const(64|32|16|8) [1])
636(Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1])
637(Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
638
639// Prove does not simplify this because x + y might overflow into carry,
640// however if no one care about the carry, let it overflow in a normal add.
641(Select0 a:(Add64carry x y (Const64 [0]))) && a.Uses == 1 => (Add64 x y)
642
643// Simplification when involving common integer
644// (t + x) - (t + y) == x - y
645// (t + x) - (y + t) == x - y
646// (x + t) - (y + t) == x - y
647// (x + t) - (t + y) == x - y
648// (x - t) + (t + y) == x + y
649// (x - t) + (y + t) == x + y
650(Sub(64|32|16|8) (Add(64|32|16|8) t x) (Add(64|32|16|8) t y)) => (Sub(64|32|16|8) x y)
651(Add(64|32|16|8) (Sub(64|32|16|8) x t) (Add(64|32|16|8) t y)) => (Add(64|32|16|8) x y)
652
653// ^(x-1) == ^x+1 == -x
654(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x)
655(Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x)
656
657// -(-x) == x
658(Neg(64|32|16|8) (Neg(64|32|16|8) x)) => x
659
660// -^x == x+1
661(Neg(64|32|16|8) <t> (Com(64|32|16|8) x)) => (Add(64|32|16|8) (Const(64|32|16|8) <t> [1]) x)
662
663(And(64|32|16|8) x (And(64|32|16|8) x y)) => (And(64|32|16|8) x y)
664(Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y)
665(Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y
666
667// Fold comparisons with numeric bounds
668(Less(64|32|16|8)U _ (Const(64|32|16|8) [0])) => (ConstBool [false])
669(Leq(64|32|16|8)U (Const(64|32|16|8) [0]) _) => (ConstBool [true])
670(Less(64|32|16|8)U (Const(64|32|16|8) [-1]) _) => (ConstBool [false])
671(Leq(64|32|16|8)U _ (Const(64|32|16|8) [-1])) => (ConstBool [true])
672(Less64 _ (Const64 [math.MinInt64])) => (ConstBool [false])
673(Less32 _ (Const32 [math.MinInt32])) => (ConstBool [false])
674(Less16 _ (Const16 [math.MinInt16])) => (ConstBool [false])
675(Less8 _ (Const8 [math.MinInt8 ])) => (ConstBool [false])
676(Leq64 (Const64 [math.MinInt64]) _) => (ConstBool [true])
677(Leq32 (Const32 [math.MinInt32]) _) => (ConstBool [true])
678(Leq16 (Const16 [math.MinInt16]) _) => (ConstBool [true])
679(Leq8 (Const8 [math.MinInt8 ]) _) => (ConstBool [true])
680(Less64 (Const64 [math.MaxInt64]) _) => (ConstBool [false])
681(Less32 (Const32 [math.MaxInt32]) _) => (ConstBool [false])
682(Less16 (Const16 [math.MaxInt16]) _) => (ConstBool [false])
683(Less8 (Const8 [math.MaxInt8 ]) _) => (ConstBool [false])
684(Leq64 _ (Const64 [math.MaxInt64])) => (ConstBool [true])
685(Leq32 _ (Const32 [math.MaxInt32])) => (ConstBool [true])
686(Leq16 _ (Const16 [math.MaxInt16])) => (ConstBool [true])
687(Leq8 _ (Const8 [math.MaxInt8 ])) => (ConstBool [true])
688
689// Canonicalize <= on numeric bounds and < near numeric bounds to ==
690(Leq(64|32|16|8)U x c:(Const(64|32|16|8) [0])) => (Eq(64|32|16|8) x c)
691(Leq(64|32|16|8)U c:(Const(64|32|16|8) [-1]) x) => (Eq(64|32|16|8) x c)
692(Less(64|32|16|8)U x (Const(64|32|16|8) <t> [1])) => (Eq(64|32|16|8) x (Const(64|32|16|8) <t> [0]))
693(Less(64|32|16|8)U (Const(64|32|16|8) <t> [-2]) x) => (Eq(64|32|16|8) x (Const(64|32|16|8) <t> [-1]))
694(Leq64 x c:(Const64 [math.MinInt64])) => (Eq64 x c)
695(Leq32 x c:(Const32 [math.MinInt32])) => (Eq32 x c)
696(Leq16 x c:(Const16 [math.MinInt16])) => (Eq16 x c)
697(Leq8 x c:(Const8 [math.MinInt8 ])) => (Eq8 x c)
698(Leq64 c:(Const64 [math.MaxInt64]) x) => (Eq64 x c)
699(Leq32 c:(Const32 [math.MaxInt32]) x) => (Eq32 x c)
700(Leq16 c:(Const16 [math.MaxInt16]) x) => (Eq16 x c)
701(Leq8 c:(Const8 [math.MaxInt8 ]) x) => (Eq8 x c)
702(Less64 x (Const64 <t> [math.MinInt64+1])) => (Eq64 x (Const64 <t> [math.MinInt64]))
703(Less32 x (Const32 <t> [math.MinInt32+1])) => (Eq32 x (Const32 <t> [math.MinInt32]))
704(Less16 x (Const16 <t> [math.MinInt16+1])) => (Eq16 x (Const16 <t> [math.MinInt16]))
705(Less8 x (Const8 <t> [math.MinInt8 +1])) => (Eq8 x (Const8 <t> [math.MinInt8 ]))
706(Less64 (Const64 <t> [math.MaxInt64-1]) x) => (Eq64 x (Const64 <t> [math.MaxInt64]))
707(Less32 (Const32 <t> [math.MaxInt32-1]) x) => (Eq32 x (Const32 <t> [math.MaxInt32]))
708(Less16 (Const16 <t> [math.MaxInt16-1]) x) => (Eq16 x (Const16 <t> [math.MaxInt16]))
709(Less8 (Const8 <t> [math.MaxInt8 -1]) x) => (Eq8 x (Const8 <t> [math.MaxInt8 ]))
710
711// Ands clear bits. Ors set bits.
712// If a subsequent Or will set all the bits
713// that an And cleared, we can skip the And.
714// This happens in bitmasking code like:
715// x &^= 3 << shift // clear two old bits
716// x |= v << shift // set two new bits
717// when shift is a small constant and v ends up a constant 3.
718(Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1])) && ^(c1 | c2) == 0 => (Or8 (Const8 <t> [c1]) x)
719(Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1])) && ^(c1 | c2) == 0 => (Or16 (Const16 <t> [c1]) x)
720(Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1])) && ^(c1 | c2) == 0 => (Or32 (Const32 <t> [c1]) x)
721(Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1])) && ^(c1 | c2) == 0 => (Or64 (Const64 <t> [c1]) x)
722
723(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF => (Trunc64to8 x)
724(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc64to16 x)
725(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF => (Trunc64to32 x)
726(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF => (Trunc32to8 x)
727(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc32to16 x)
728(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF => (Trunc16to8 x)
729
730(ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 56 => x
731(ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 48 => x
732(ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 32 => x
733(ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 24 => x
734(ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 16 => x
735(ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) && s >= 8 => x
736
737(SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) && s >= 56 => x
738(SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) && s >= 48 => x
739(SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) && s >= 32 => x
740(SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) && s >= 24 => x
741(SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) && s >= 16 => x
742(SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) && s >= 8 => x
743
744(Slicemask (Const32 [x])) && x > 0 => (Const32 [-1])
745(Slicemask (Const32 [0])) => (Const32 [0])
746(Slicemask (Const64 [x])) && x > 0 => (Const64 [-1])
747(Slicemask (Const64 [0])) => (Const64 [0])
748
749// simplifications often used for lengths. e.g. len(s[i:i+5])==5
750(Sub(64|32|16|8) (Add(64|32|16|8) x y) x) => y
751(Sub(64|32|16|8) (Add(64|32|16|8) x y) y) => x
752(Sub(64|32|16|8) (Sub(64|32|16|8) x y) x) => (Neg(64|32|16|8) y)
753(Sub(64|32|16|8) x (Add(64|32|16|8) x y)) => (Neg(64|32|16|8) y)
754(Add(64|32|16|8) x (Sub(64|32|16|8) y x)) => y
755(Add(64|32|16|8) x (Add(64|32|16|8) y (Sub(64|32|16|8) z x))) => (Add(64|32|16|8) y z)
756
757// basic phi simplifications
758(Phi (Const8 [c]) (Const8 [c])) => (Const8 [c])
759(Phi (Const16 [c]) (Const16 [c])) => (Const16 [c])
760(Phi (Const32 [c]) (Const32 [c])) => (Const32 [c])
761(Phi (Const64 [c]) (Const64 [c])) => (Const64 [c])
762
763// slice and interface comparisons
764// The frontend ensures that we can only compare against nil,
765// so we need only compare the first word (interface type or slice ptr).
766(EqInter x y) => (EqPtr (ITab x) (ITab y))
767(NeqInter x y) => (NeqPtr (ITab x) (ITab y))
768(EqSlice x y) => (EqPtr (SlicePtr x) (SlicePtr y))
769(NeqSlice x y) => (NeqPtr (SlicePtr x) (SlicePtr y))
770
771// Load of store of same address, with compatibly typed value and same size
772(Load <t1> p1 (Store {t2} p2 x _))
773 && isSamePtr(p1, p2)
774 && copyCompatibleType(t1, x.Type)
775 && t1.Size() == t2.Size()
776 => x
777(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
778 && isSamePtr(p1, p3)
779 && copyCompatibleType(t1, x.Type)
780 && t1.Size() == t3.Size()
781 && disjoint(p3, t3.Size(), p2, t2.Size())
782 => x
783(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
784 && isSamePtr(p1, p4)
785 && copyCompatibleType(t1, x.Type)
786 && t1.Size() == t4.Size()
787 && disjoint(p4, t4.Size(), p2, t2.Size())
788 && disjoint(p4, t4.Size(), p3, t3.Size())
789 => x
790(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
791 && isSamePtr(p1, p5)
792 && copyCompatibleType(t1, x.Type)
793 && t1.Size() == t5.Size()
794 && disjoint(p5, t5.Size(), p2, t2.Size())
795 && disjoint(p5, t5.Size(), p3, t3.Size())
796 && disjoint(p5, t5.Size(), p4, t4.Size())
797 => x
798
799// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
800(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))])
801(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))])
802(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))])
803(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && t2.Size() == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))])
804
805// Float Loads up to Zeros so they can be constant folded.
806(Load <t1> op:(OffPtr [o1] p1)
807 (Store {t2} p2 _
808 mem:(Zero [n] p3 _)))
809 && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
810 && CanSSA(t1)
811 && disjoint(op, t1.Size(), p2, t2.Size())
812 => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
813(Load <t1> op:(OffPtr [o1] p1)
814 (Store {t2} p2 _
815 (Store {t3} p3 _
816 mem:(Zero [n] p4 _))))
817 && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
818 && CanSSA(t1)
819 && disjoint(op, t1.Size(), p2, t2.Size())
820 && disjoint(op, t1.Size(), p3, t3.Size())
821 => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
822(Load <t1> op:(OffPtr [o1] p1)
823 (Store {t2} p2 _
824 (Store {t3} p3 _
825 (Store {t4} p4 _
826 mem:(Zero [n] p5 _)))))
827 && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
828 && CanSSA(t1)
829 && disjoint(op, t1.Size(), p2, t2.Size())
830 && disjoint(op, t1.Size(), p3, t3.Size())
831 && disjoint(op, t1.Size(), p4, t4.Size())
832 => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
833(Load <t1> op:(OffPtr [o1] p1)
834 (Store {t2} p2 _
835 (Store {t3} p3 _
836 (Store {t4} p4 _
837 (Store {t5} p5 _
838 mem:(Zero [n] p6 _))))))
839 && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
840 && CanSSA(t1)
841 && disjoint(op, t1.Size(), p2, t2.Size())
842 && disjoint(op, t1.Size(), p3, t3.Size())
843 && disjoint(op, t1.Size(), p4, t4.Size())
844 && disjoint(op, t1.Size(), p5, t5.Size())
845 => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
846
847// Zero to Load forwarding.
848(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
849 && t1.IsBoolean()
850 && isSamePtr(p1, p2)
851 && n >= o + 1
852 => (ConstBool [false])
853(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
854 && is8BitInt(t1)
855 && isSamePtr(p1, p2)
856 && n >= o + 1
857 => (Const8 [0])
858(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
859 && is16BitInt(t1)
860 && isSamePtr(p1, p2)
861 && n >= o + 2
862 => (Const16 [0])
863(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
864 && is32BitInt(t1)
865 && isSamePtr(p1, p2)
866 && n >= o + 4
867 => (Const32 [0])
868(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
869 && is64BitInt(t1)
870 && isSamePtr(p1, p2)
871 && n >= o + 8
872 => (Const64 [0])
873(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
874 && is32BitFloat(t1)
875 && isSamePtr(p1, p2)
876 && n >= o + 4
877 => (Const32F [0])
878(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
879 && is64BitFloat(t1)
880 && isSamePtr(p1, p2)
881 && n >= o + 8
882 => (Const64F [0])
883
884// Eliminate stores of values that have just been loaded from the same location.
885// We also handle the common case where there are some intermediate stores.
886(Store {t1} p1 (Load <t2> p2 mem) mem)
887 && isSamePtr(p1, p2)
888 && t2.Size() == t1.Size()
889 => mem
890(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
891 && isSamePtr(p1, p2)
892 && t2.Size() == t1.Size()
893 && disjoint(p1, t1.Size(), p3, t3.Size())
894 => mem
895(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
896 && isSamePtr(p1, p2)
897 && t2.Size() == t1.Size()
898 && disjoint(p1, t1.Size(), p3, t3.Size())
899 && disjoint(p1, t1.Size(), p4, t4.Size())
900 => mem
901(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
902 && isSamePtr(p1, p2)
903 && t2.Size() == t1.Size()
904 && disjoint(p1, t1.Size(), p3, t3.Size())
905 && disjoint(p1, t1.Size(), p4, t4.Size())
906 && disjoint(p1, t1.Size(), p5, t5.Size())
907 => mem
908
909// Don't Store zeros to cleared variables.
910(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
911 && isConstZero(x)
912 && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
913 => mem
914(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
915 && isConstZero(x)
916 && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3)
917 && disjoint(op, t1.Size(), p2, t2.Size())
918 => mem
919(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
920 && isConstZero(x)
921 && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4)
922 && disjoint(op, t1.Size(), p2, t2.Size())
923 && disjoint(op, t1.Size(), p3, t3.Size())
924 => mem
925(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
926 && isConstZero(x)
927 && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5)
928 && disjoint(op, t1.Size(), p2, t2.Size())
929 && disjoint(op, t1.Size(), p3, t3.Size())
930 && disjoint(op, t1.Size(), p4, t4.Size())
931 => mem
932
933// Collapse OffPtr
934(OffPtr (OffPtr p [y]) [x]) => (OffPtr p [x+y])
935(OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p
936
937// indexing operations
938// Note: bounds check has already been done
939(PtrIndex <t> ptr idx) && config.PtrSize == 4 && is32Bit(t.Elem().Size()) => (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
940(PtrIndex <t> ptr idx) && config.PtrSize == 8 => (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
941
942// struct operations
943(StructSelect [i] x:(StructMake ___)) => x.Args[i]
944(Load <t> _ _) && t.IsStruct() && t.Size() > 0 && CanSSA(t) && !t.IsSIMD() => rewriteStructLoad(v)
945(Store _ (StructMake ___) _) => rewriteStructStore(v)
946
947(StructSelect [i] x:(Load <t> ptr mem)) && !CanSSA(t) =>
948 @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
949
950// Putting struct{*byte} and similar into direct interfaces.
951(IMake _typ (StructMake ___)) => imakeOfStructMake(v)
952(StructSelect (IData x)) && v.Type.Size() > 0 => (IData x)
953(StructSelect (IData x)) && v.Type.Size() == 0 => (Empty)
954
955// un-SSAable values use mem->mem copies
956(Store {t} dst (Load src mem) mem) && !CanSSA(t) =>
957 (Move {t} [t.Size()] dst src mem)
958(Store {t} dst (Load src mem) (VarDef {x} mem)) && !CanSSA(t) =>
959 (Move {t} [t.Size()] dst src (VarDef {x} mem))
960
961// array ops
962(ArraySelect (ArrayMake1 x)) => x
963
964(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && CanSSA(t) =>
965 (ArrayMake1 (Load <t.Elem()> ptr mem))
966
967(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
968
969// Putting [1]*byte and similar into direct interfaces.
970(IMake _typ (ArrayMake1 val)) => (IMake _typ val)
971(ArraySelect [0] (IData x)) => (IData x)
972
973// zero-sized values.
974(Load <t> _ _) && t.Size() == 0 => (Empty)
975(Store _ (Empty) mem) => mem
976
977// string ops
978// Decomposing StringMake and lowering of StringPtr and StringLen
979// happens in a later pass, dec, so that these operations are available
980// to other passes for optimizations.
981(StringPtr (StringMake (Addr <t> {s} base) _)) => (Addr <t> {s} base)
982(StringLen (StringMake _ (Const64 <t> [c]))) => (Const64 <t> [c])
983(ConstString {str}) && config.PtrSize == 4 && str == "" =>
984 (StringMake (ConstNil) (Const32 <typ.Int> [0]))
985(ConstString {str}) && config.PtrSize == 8 && str == "" =>
986 (StringMake (ConstNil) (Const64 <typ.Int> [0]))
987(ConstString {str}) && config.PtrSize == 4 && str != "" =>
988 (StringMake
989 (Addr <typ.BytePtr> {fe.StringData(str)}
990 (SB))
991 (Const32 <typ.Int> [int32(len(str))]))
992(ConstString {str}) && config.PtrSize == 8 && str != "" =>
993 (StringMake
994 (Addr <typ.BytePtr> {fe.StringData(str)}
995 (SB))
996 (Const64 <typ.Int> [int64(len(str))]))
997
998// slice ops
999// Only a few slice rules are provided here. See dec.rules for
1000// a more comprehensive set.
1001(SliceLen (SliceMake _ (Const64 <t> [c]) _)) => (Const64 <t> [c])
1002(SliceCap (SliceMake _ _ (Const64 <t> [c]))) => (Const64 <t> [c])
1003(SliceLen (SliceMake _ (Const32 <t> [c]) _)) => (Const32 <t> [c])
1004(SliceCap (SliceMake _ _ (Const32 <t> [c]))) => (Const32 <t> [c])
1005(SlicePtr (SliceMake (SlicePtr x) _ _)) => (SlicePtr x)
1006(SliceLen (SliceMake _ (SliceLen x) _)) => (SliceLen x)
1007(SliceCap (SliceMake _ _ (SliceCap x))) => (SliceCap x)
1008(SliceCap (SliceMake _ _ (SliceLen x))) => (SliceLen x)
1009(ConstSlice) && config.PtrSize == 4 =>
1010 (SliceMake
1011 (ConstNil <v.Type.Elem().PtrTo()>)
1012 (Const32 <typ.Int> [0])
1013 (Const32 <typ.Int> [0]))
1014(ConstSlice) && config.PtrSize == 8 =>
1015 (SliceMake
1016 (ConstNil <v.Type.Elem().PtrTo()>)
1017 (Const64 <typ.Int> [0])
1018 (Const64 <typ.Int> [0]))
1019
1020// Special rule to help constant slicing; len > 0 implies cap > 0 implies Slicemask is all 1
1021(SliceMake (AddPtr <t> x (And64 y (Slicemask _))) w:(Const64 [c]) z) && c > 0 => (SliceMake (AddPtr <t> x y) w z)
1022(SliceMake (AddPtr <t> x (And32 y (Slicemask _))) w:(Const32 [c]) z) && c > 0 => (SliceMake (AddPtr <t> x y) w z)
1023
1024// interface ops
1025(ConstInterface) =>
1026 (IMake
1027 (ConstNil <typ.Uintptr>)
1028 (ConstNil <typ.BytePtr>))
1029
1030(NilCheck ptr:(GetG mem) mem) => ptr
1031
1032(If (Not cond) yes no) => (If cond no yes)
1033(If (ConstBool [c]) yes no) && c => (First yes no)
1034(If (ConstBool [c]) yes no) && !c => (First no yes)
1035
1036(Phi <t> nx:(Not x) ny:(Not y)) && nx.Uses == 1 && ny.Uses == 1 => (Not (Phi <t> x y))
1037
1038// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer.
1039(Convert (Add(64|32) (Convert ptr mem) off) mem) => (AddPtr ptr off)
1040(Convert (Convert ptr mem) mem) => ptr
1041// Note: it is important that the target rewrite is ptr+(off1+off2), not (ptr+off1)+off2.
1042// We must ensure that no intermediate computations are invalid pointers.
1043(Convert a:(Add(64|32) (Add(64|32) (Convert ptr mem) off1) off2) mem) => (AddPtr ptr (Add(64|32) <a.Type> off1 off2))
1044
1045// Simplification of divisions.
1046// Only trivial, easily analyzed (by prove) rewrites here.
1047// Strength reduction of div to mul is delayed to divmod.rules.
1048
1049// Signed divide by a negative constant. Rewrite to divide by a positive constant.
1050(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 <t> n (Const8 <t> [-c])))
1051(Div16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Neg16 (Div16 <t> n (Const16 <t> [-c])))
1052(Div32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Neg32 (Div32 <t> n (Const32 <t> [-c])))
1053(Div64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Neg64 (Div64 <t> n (Const64 <t> [-c])))
1054
1055// Dividing by the most-negative number. Result is always 0 except
1056// if the input is also the most-negative number.
1057// We can detect that using the sign bit of x & -x.
1058(Div64 x (Const64 [-1<<63])) && isNonNegative(x) => (Const64 [0])
1059(Div8 <t> x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
1060(Div16 <t> x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
1061(Div32 <t> x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
1062(Div64 <t> x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
1063
1064// Unsigned divide by power of 2. Strength reduce to a shift.
1065(Div8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8u(uint8(c))]))
1066(Div16u n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16u(uint16(c))]))
1067(Div32u n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32u(uint32(c))]))
1068(Div64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64u(uint64(c))]))
1069
1070// Strength reduce multiplication by a power of two to a shift.
1071// Excluded from early opt so that prove can recognize mod
1072// by the x - (x/d)*d pattern.
1073// (Runs during "middle opt" and "late opt".)
1074(Mul8 <t> x (Const8 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" =>
1075 (Lsh8x64 <t> x (Const64 <typ.UInt64> [log8(c)]))
1076(Mul16 <t> x (Const16 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" =>
1077 (Lsh16x64 <t> x (Const64 <typ.UInt64> [log16(c)]))
1078(Mul32 <t> x (Const32 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" =>
1079 (Lsh32x64 <t> x (Const64 <typ.UInt64> [log32(c)]))
1080(Mul64 <t> x (Const64 [c])) && isPowerOfTwo(c) && v.Block.Func.pass.name != "opt" =>
1081 (Lsh64x64 <t> x (Const64 <typ.UInt64> [log64(c)]))
1082(Mul8 <t> x (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" =>
1083 (Neg8 (Lsh8x64 <t> x (Const64 <typ.UInt64> [log8(-c)])))
1084(Mul16 <t> x (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" =>
1085 (Neg16 (Lsh16x64 <t> x (Const64 <typ.UInt64> [log16(-c)])))
1086(Mul32 <t> x (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" =>
1087 (Neg32 (Lsh32x64 <t> x (Const64 <typ.UInt64> [log32(-c)])))
1088(Mul64 <t> x (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) && v.Block.Func.pass.name != "opt" =>
1089 (Neg64 (Lsh64x64 <t> x (Const64 <typ.UInt64> [log64(-c)])))
1090
1091// Strength reduction of mod to div.
1092// Strength reduction of div to mul is delayed to genericlateopt.rules.
1093
1094// Unsigned mod by power of 2 constant.
1095(Mod8u <t> n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (And8 n (Const8 <t> [c-1]))
1096(Mod16u <t> n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (And16 n (Const16 <t> [c-1]))
1097(Mod32u <t> n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (And32 n (Const32 <t> [c-1]))
1098(Mod64u <t> n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (And64 n (Const64 <t> [c-1]))
1099
1100// Signed non-negative mod by power of 2 constant.
1101// TODO: Replace ModN with ModNu in prove.
1102(Mod8 <t> n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And8 n (Const8 <t> [c-1]))
1103(Mod16 <t> n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And16 n (Const16 <t> [c-1]))
1104(Mod32 <t> n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And32 n (Const32 <t> [c-1]))
1105(Mod64 <t> n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And64 n (Const64 <t> [c-1]))
1106(Mod64 n (Const64 [-1<<63])) && isNonNegative(n) => n
1107
1108// Signed mod by negative constant.
1109(Mod8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Mod8 <t> n (Const8 <t> [-c]))
1110(Mod16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Mod16 <t> n (Const16 <t> [-c]))
1111(Mod32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Mod32 <t> n (Const32 <t> [-c]))
1112(Mod64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Mod64 <t> n (Const64 <t> [-c]))
1113
1114// All other mods by constants, do A%B = A-(A/B*B).
1115// This implements % with two * and a bunch of ancillary ops.
1116// One of the * is free if the user's code also computes A/B.
1117(Mod8 <t> x (Const8 [c])) && x.Op != OpConst8 && (c > 0 || c == -1<<7)
1118 => (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
1119(Mod16 <t> x (Const16 [c])) && x.Op != OpConst16 && (c > 0 || c == -1<<15)
1120 => (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
1121(Mod32 <t> x (Const32 [c])) && x.Op != OpConst32 && (c > 0 || c == -1<<31)
1122 => (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
1123(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63)
1124 => (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
1125(Mod8u <t> x (Const8 [c])) && x.Op != OpConst8 && c != 0
1126 => (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
1127(Mod16u <t> x (Const16 [c])) && x.Op != OpConst16 && c != 0
1128 => (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
1129(Mod32u <t> x (Const32 [c])) && x.Op != OpConst32 && c != 0
1130 => (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
1131(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && c != 0
1132 => (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
1133
1134// Set up for mod->mul+rot optimization in genericlateopt.rules.
1135// For architectures without rotates on less than 32-bits, promote to 32-bit.
1136// TODO: Also != 0 case?
1137(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) =>
1138 (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
1139(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) =>
1140 (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
1141(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) =>
1142 (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
1143(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) =>
1144 (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
1145
1146(Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y)
1147(Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y)
1148
1149// Optimize bitsets
1150(Eq(8|16|32|64) (And(8|16|32|64) <t> x (Const(8|16|32|64) <t> [y])) (Const(8|16|32|64) <t> [y])) && oneBit(y)
1151 => (Neq(8|16|32|64) (And(8|16|32|64) <t> x (Const(8|16|32|64) <t> [y])) (Const(8|16|32|64) <t> [0]))
1152(Neq(8|16|32|64) (And(8|16|32|64) <t> x (Const(8|16|32|64) <t> [y])) (Const(8|16|32|64) <t> [y])) && oneBit(y)
1153 => (Eq(8|16|32|64) (And(8|16|32|64) <t> x (Const(8|16|32|64) <t> [y])) (Const(8|16|32|64) <t> [0]))
1154
1155// Mark newly generated bounded shifts as bounded, for opt passes after prove.
1156(Lsh64x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 64 => (Lsh64x(8|16|32|64) [true] x con)
1157(Rsh64x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 64 => (Rsh64x(8|16|32|64) [true] x con)
1158(Rsh64Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 64 => (Rsh64Ux(8|16|32|64) [true] x con)
1159(Lsh32x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 32 => (Lsh32x(8|16|32|64) [true] x con)
1160(Rsh32x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 32 => (Rsh32x(8|16|32|64) [true] x con)
1161(Rsh32Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 32 => (Rsh32Ux(8|16|32|64) [true] x con)
1162(Lsh16x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 16 => (Lsh16x(8|16|32|64) [true] x con)
1163(Rsh16x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 16 => (Rsh16x(8|16|32|64) [true] x con)
1164(Rsh16Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 16 => (Rsh16Ux(8|16|32|64) [true] x con)
1165(Lsh8x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 8 => (Lsh8x(8|16|32|64) [true] x con)
1166(Rsh8x(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 8 => (Rsh8x(8|16|32|64) [true] x con)
1167(Rsh8Ux(8|16|32|64) [false] x con:(Const(8|16|32|64) [c])) && 0 < c && c < 8 => (Rsh8Ux(8|16|32|64) [true] x con)
1168
1169// Reassociate expressions involving
1170// constants such that constants come first,
1171// exposing obvious constant-folding opportunities.
1172// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C
1173// is constant, which pushes constants to the outside
1174// of the expression. At that point, any constant-folding
1175// opportunities should be obvious.
1176// Note: don't include AddPtr here! In order to maintain the
1177// invariant that pointers must stay within the pointed-to object,
1178// we can't pull part of a pointer computation above the AddPtr.
1179// See issue 37881.
1180// Note: we don't need to handle any (x-C) cases because we already rewrite
1181// (x-C) to (x+(-C)).
1182
1183// x + (C + z) -> C + (x + z)
1184(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x))
1185(Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 <t> z x))
1186(Add16 (Add16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 <t> z x))
1187(Add8 (Add8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 <t> z x))
1188
1189// x + (C - z) -> C + (x - z)
1190(Add64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> x z))
1191(Add32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> x z))
1192(Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z))
1193(Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z))
1194
1195// x - (C - z) -> x + (z - C) -> (x + z) - C
1196(Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i)
1197(Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i)
1198(Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i)
1199(Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i)
1200
1201// x - (z + C) -> x + (-z - C) -> (x - z) - C
1202(Sub64 x (Add64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 <t> x z) i)
1203(Sub32 x (Add32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 <t> x z) i)
1204(Sub16 x (Add16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 <t> x z) i)
1205(Sub8 x (Add8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 <t> x z) i)
1206
1207// (C - z) - x -> C - (z + x)
1208(Sub64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 <t> z x))
1209(Sub32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 <t> z x))
1210(Sub16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 <t> z x))
1211(Sub8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 <t> z x))
1212
1213// (z + C) -x -> C + (z - x)
1214(Sub64 (Add64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> z x))
1215(Sub32 (Add32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> z x))
1216(Sub16 (Add16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> z x))
1217(Sub8 (Add8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> z x))
1218
1219// x & (C & z) -> C & (x & z)
1220(And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x))
1221(And32 (And32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (And32 i (And32 <t> z x))
1222(And16 (And16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (And16 i (And16 <t> z x))
1223(And8 (And8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (And8 i (And8 <t> z x))
1224
1225// x | (C | z) -> C | (x | z)
1226(Or64 (Or64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Or64 i (Or64 <t> z x))
1227(Or32 (Or32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Or32 i (Or32 <t> z x))
1228(Or16 (Or16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Or16 i (Or16 <t> z x))
1229(Or8 (Or8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Or8 i (Or8 <t> z x))
1230
1231// x ^ (C ^ z) -> C ^ (x ^ z)
1232(Xor64 (Xor64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Xor64 i (Xor64 <t> z x))
1233(Xor32 (Xor32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Xor32 i (Xor32 <t> z x))
1234(Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x))
1235(Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x))
1236
1237// x * (D * z) = D * (x * z)
1238(Mul64 (Mul64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 <t> x z))
1239(Mul32 (Mul32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 <t> x z))
1240(Mul16 (Mul16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 <t> x z))
1241(Mul8 (Mul8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 <t> x z))
1242
1243// C + (D + x) -> (C + D) + x
1244(Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x)
1245(Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x)
1246(Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c+d]) x)
1247(Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c+d]) x)
1248
1249// C + (D - x) -> (C + D) - x
1250(Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c+d]) x)
1251(Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c+d]) x)
1252(Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x)
1253(Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x)
1254
1255// C - (D - x) -> (C - D) + x
1256(Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x)
1257(Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x)
1258(Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x)
1259(Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x)
1260
1261// C - (D + x) -> (C - D) - x
1262(Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c-d]) x)
1263(Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c-d]) x)
1264(Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c-d]) x)
1265(Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c-d]) x)
1266
1267// C & (D & x) -> (C & D) & x
1268(And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x)
1269(And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x)
1270(And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x)) => (And16 (Const16 <t> [c&d]) x)
1271(And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x)) => (And8 (Const8 <t> [c&d]) x)
1272
1273// C | (D | x) -> (C | D) | x
1274(Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x)) => (Or64 (Const64 <t> [c|d]) x)
1275(Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x)) => (Or32 (Const32 <t> [c|d]) x)
1276(Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x)) => (Or16 (Const16 <t> [c|d]) x)
1277(Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x)) => (Or8 (Const8 <t> [c|d]) x)
1278
1279// C ^ (D ^ x) -> (C ^ D) ^ x
1280(Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x)) => (Xor64 (Const64 <t> [c^d]) x)
1281(Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x)) => (Xor32 (Const32 <t> [c^d]) x)
1282(Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x)) => (Xor16 (Const16 <t> [c^d]) x)
1283(Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x)) => (Xor8 (Const8 <t> [c^d]) x)
1284
1285// C * (D * x) = (C * D) * x
1286(Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x)) => (Mul64 (Const64 <t> [c*d]) x)
1287(Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x)) => (Mul32 (Const32 <t> [c*d]) x)
1288(Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x)) => (Mul16 (Const16 <t> [c*d]) x)
1289(Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) => (Mul8 (Const8 <t> [c*d]) x)
1290
1291// floating point optimizations
1292(Mul(32|64)F x (Const(32|64)F [1])) => x
1293(Mul32F x (Const32F [-1])) => (Neg32F x)
1294(Mul64F x (Const64F [-1])) => (Neg64F x)
1295(Mul32F x (Const32F [2])) => (Add32F x x)
1296(Mul64F x (Const64F [2])) => (Add64F x x)
1297
1298(Div32F x (Const32F <t> [c])) && reciprocalExact32(c) => (Mul32F x (Const32F <t> [1/c]))
1299(Div64F x (Const64F <t> [c])) && reciprocalExact64(c) => (Mul64F x (Const64F <t> [1/c]))
1300
1301// rewrite single-precision sqrt expression "float32(math.Sqrt(float64(x)))"
1302(Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) && sqrt0.Uses==1 => (Sqrt32 x)
1303
1304(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)])
1305
1306// for rewriting constant folded math/bits ops
1307(Select0 (MakeTuple x y)) => x
1308(Select1 (MakeTuple x y)) => y
1309
1310// for rewriting results of some late-expanded rewrites (below)
1311(SelectN [n] m:(MakeResult ___)) => m.Args[n]
1312
1313// TODO(matloob): Try out having non-zeroing mallocs for prointerless
1314// memory, and leaving the zeroing here. Then the compiler can remove
1315// the zeroing if the user has explicit writes to the whole object.
1316
1317// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
1318(Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call))
1319 && isMalloc(call.Aux)
1320 => mem
1321
1322(Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call))
1323 && isConstZero(x)
1324 && isMalloc(call.Aux)
1325 => mem
1326
1327(Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call))
1328 && isConstZero(x)
1329 && isMalloc(call.Aux)
1330 => mem
1331
1332(NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _)
1333 && isMalloc(call.Aux)
1334 && warnRule(fe.Debug_checknil(), v, "removed nil check")
1335 => ptr
1336
1337(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _)
1338 && isMalloc(call.Aux)
1339 && warnRule(fe.Debug_checknil(), v, "removed nil check")
1340 => ptr
1341
1342// Addresses of globals are always non-nil.
1343(NilCheck ptr:(Addr {_} (SB)) _) => ptr
1344(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr
1345
1346// Addresses of locals are always non-nil.
1347(NilCheck ptr:(LocalAddr _ _) _)
1348 && warnRule(fe.Debug_checknil(), v, "removed nil check")
1349 => ptr
1350
1351// .dict args are always non-nil.
1352(NilCheck ptr:(Arg {sym}) _) && isDictArgSym(sym) => ptr
1353
1354// Nil checks of nil checks are redundant.
1355// See comment at the end of https://go-review.googlesource.com/c/go/+/537775.
1356(NilCheck ptr:(NilCheck _ _) _ ) => ptr
1357
1358// for late-expanded calls, recognize memequal applied to a single constant byte
1359// Support is limited by [1-8] byte sizes
1360(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
1361 && isSameCall(callAux, "runtime.memequal")
1362 && symIsRO(scon)
1363 => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
1364
1365(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [1]) mem)
1366 && isSameCall(callAux, "runtime.memequal")
1367 && symIsRO(scon)
1368 => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
1369
1370(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem)
1371 && isSameCall(callAux, "runtime.memequal")
1372 && symIsRO(scon)
1373 && canLoadUnaligned(config)
1374 => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
1375
1376(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [2]) mem)
1377 && isSameCall(callAux, "runtime.memequal")
1378 && symIsRO(scon)
1379 && canLoadUnaligned(config)
1380 => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
1381
1382(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem)
1383 && isSameCall(callAux, "runtime.memequal")
1384 && symIsRO(scon)
1385 && canLoadUnaligned(config)
1386 => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
1387
1388(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [4]) mem)
1389 && isSameCall(callAux, "runtime.memequal")
1390 && symIsRO(scon)
1391 && canLoadUnaligned(config)
1392 => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
1393
1394(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem)
1395 && isSameCall(callAux, "runtime.memequal")
1396 && symIsRO(scon)
1397 && canLoadUnaligned(config) && config.PtrSize == 8
1398 => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
1399
1400(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [8]) mem)
1401 && isSameCall(callAux, "runtime.memequal")
1402 && symIsRO(scon)
1403 && canLoadUnaligned(config) && config.PtrSize == 8
1404 => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
1405
1406(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [3]) mem)
1407 && isSameCall(callAux, "runtime.memequal")
1408 && symIsRO(scon)
1409 && canLoadUnaligned(config) =>
1410 (MakeResult
1411 (Eq32
1412 (Or32 <typ.Int32>
1413 (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
1414 (Lsh32x32 <typ.Int32>
1415 (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
1416 (Const32 <typ.Int32> [16])))
1417 (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
1418 mem)
1419
1420(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [3]) mem)
1421 && isSameCall(callAux, "runtime.memequal")
1422 && symIsRO(scon)
1423 && canLoadUnaligned(config) =>
1424 (MakeResult
1425 (Eq32
1426 (Or32 <typ.Int32>
1427 (ZeroExt16to32 <typ.Int32> (Load <typ.Int16> sptr mem))
1428 (Lsh32x32 <typ.Int32>
1429 (ZeroExt8to32 <typ.Int32> (Load <typ.Int8> (OffPtr <typ.BytePtr> [2] sptr) mem))
1430 (Const32 <typ.Int32> [16])))
1431 (Const32 <typ.Int32> [int32(uint32(read16(scon,0,config.ctxt.Arch.ByteOrder))|(uint32(read8(scon,2))<<16))]))
1432 mem)
1433
1434(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [5]) mem)
1435 && isSameCall(callAux, "runtime.memequal")
1436 && symIsRO(scon)
1437 && canLoadUnaligned(config) && config.PtrSize == 8 =>
1438 (MakeResult
1439 (Eq64
1440 (Or64 <typ.Int64>
1441 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
1442 (Lsh64x64 <typ.Int64>
1443 (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
1444 (Const64 <typ.Int64> [32])))
1445 (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
1446 mem)
1447
1448(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [5]) mem)
1449 && isSameCall(callAux, "runtime.memequal")
1450 && symIsRO(scon)
1451 && canLoadUnaligned(config) && config.PtrSize == 8 =>
1452 (MakeResult
1453 (Eq64
1454 (Or64 <typ.Int64>
1455 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
1456 (Lsh64x64 <typ.Int64>
1457 (ZeroExt8to64 <typ.Int64> (Load <typ.Int8> (OffPtr <typ.BytePtr> [4] sptr) mem))
1458 (Const64 <typ.Int64> [32])))
1459 (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read8(scon,4))<<32))]))
1460 mem)
1461
1462(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [6]) mem)
1463 && isSameCall(callAux, "runtime.memequal")
1464 && symIsRO(scon)
1465 && canLoadUnaligned(config) && config.PtrSize == 8 =>
1466 (MakeResult
1467 (Eq64
1468 (Or64 <typ.Int64>
1469 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
1470 (Lsh64x64 <typ.Int64>
1471 (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
1472 (Const64 <typ.Int64> [32])))
1473 (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
1474 mem)
1475
1476(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [6]) mem)
1477 && isSameCall(callAux, "runtime.memequal")
1478 && symIsRO(scon)
1479 && canLoadUnaligned(config) && config.PtrSize == 8 =>
1480 (MakeResult
1481 (Eq64
1482 (Or64 <typ.Int64>
1483 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
1484 (Lsh64x64 <typ.Int64>
1485 (ZeroExt16to64 <typ.Int64> (Load <typ.Int16> (OffPtr <typ.BytePtr> [4] sptr) mem))
1486 (Const64 <typ.Int64> [32])))
1487 (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read16(scon,4,config.ctxt.Arch.ByteOrder))<<32))]))
1488 mem)
1489
1490(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [7]) mem)
1491 && isSameCall(callAux, "runtime.memequal")
1492 && symIsRO(scon)
1493 && canLoadUnaligned(config) && config.PtrSize == 8 =>
1494 (MakeResult
1495 (Eq64
1496 (Or64 <typ.Int64>
1497 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
1498 (Lsh64x64 <typ.Int64>
1499 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
1500 (Const64 <typ.Int64> [32])))
1501 (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
1502 mem)
1503
1504(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [7]) mem)
1505 && isSameCall(callAux, "runtime.memequal")
1506 && symIsRO(scon)
1507 && canLoadUnaligned(config) && config.PtrSize == 8 =>
1508 (MakeResult
1509 (Eq64
1510 (Or64 <typ.Int64>
1511 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> sptr mem))
1512 (Lsh64x64 <typ.Int64>
1513 (ZeroExt32to64 <typ.Int64> (Load <typ.Int32> (OffPtr <typ.BytePtr> [3] sptr) mem))
1514 (Const64 <typ.Int64> [32])))
1515 (Const64 <typ.Int64> [int64(uint64(read32(scon,0,config.ctxt.Arch.ByteOrder))|(uint64(read32(scon,3,config.ctxt.Arch.ByteOrder))<<32))]))
1516 mem)
1517
1518(StaticLECall {callAux} _ _ (Const64 [0]) mem)
1519 && isSameCall(callAux, "runtime.memequal")
1520 => (MakeResult (ConstBool <typ.Bool> [true]) mem)
1521
1522(Static(Call|LECall) {callAux} p q _ mem)
1523 && isSameCall(callAux, "runtime.memequal")
1524 && isSamePtr(p, q)
1525 => (MakeResult (ConstBool <typ.Bool> [true]) mem)
1526
1527(MemEq sptr tptr (Const64 [1]) mem)
1528 => (Eq8 (Load <typ.Int8> sptr mem) (Load <typ.Int8> tptr mem))
1529
1530(Load <typ.Int8> sptr:(Addr {scon} (SB)) mem)
1531 && symIsRO(scon)
1532 => (Const8 <typ.Int8> [int8(read8(scon,0))])
1533
1534(MemEq sptr tptr (Const64 [2]) mem)
1535 && canLoadUnaligned(config)
1536 => (Eq16 (Load <typ.Int16> sptr mem) (Load <typ.Int16> tptr mem))
1537
1538(Load <typ.Int16> sptr:(Addr {scon} (SB)) mem)
1539 && symIsRO(scon)
1540 => (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])
1541
1542(MemEq sptr tptr (Const64 [4]) mem)
1543 && canLoadUnaligned(config)
1544 => (Eq32 (Load <typ.Int32> sptr mem) (Load <typ.Int32> tptr mem))
1545
1546(Load <typ.Int32> sptr:(Addr {scon} (SB)) mem)
1547 && symIsRO(scon)
1548 => (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])
1549
1550(MemEq sptr tptr (Const64 [8]) mem)
1551 && canLoadUnaligned(config) && config.PtrSize == 8
1552 => (Eq64 (Load <typ.Int64> sptr mem) (Load <typ.Int64> tptr mem))
1553
1554(Load <typ.Int64> sptr:(Addr {scon} (SB)) mem)
1555 && symIsRO(scon)
1556 => (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])
1557
1558(MemEq _ _ (Const64 [0]) _) => (ConstBool <typ.Bool> [true])
1559
1560(MemEq p q _ _) && isSamePtr(p, q) => (ConstBool <typ.Bool> [true])
1561
1562// Turn known-size calls to memclrNoHeapPointers into a Zero.
1563// Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details.
1564(SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem))
1565 && isInlinableMemclr(config, int64(c))
1566 && isSameCall(sym, "runtime.memclrNoHeapPointers")
1567 && call.Uses == 1
1568 && clobber(call)
1569 => (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem)
1570
1571// Recognise make([]T, 0) and replace it with a pointer to the zerobase
1572(StaticLECall {callAux} _ (Const(64|32) [0]) (Const(64|32) [0]) mem)
1573 && isSameCall(callAux, "runtime.makeslice")
1574 => (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem)
1575
1576// Evaluate constant address comparisons.
1577(EqPtr x x) => (ConstBool [true])
1578(NeqPtr x x) => (ConstBool [false])
1579(EqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x == y])
1580(EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x == y && o == 0])
1581(EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x == y && o1 == o2])
1582(NeqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x != y])
1583(NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x != y || o != 0])
1584(NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x != y || o1 != o2])
1585(EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x == y])
1586(EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x == y && o == 0])
1587(EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x == y && o1 == o2])
1588(NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x != y])
1589(NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x != y || o != 0])
1590(NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x != y || o1 != o2])
1591(EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0])
1592(NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0])
1593(EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2])
1594(NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 != o2])
1595(EqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c == d])
1596(NeqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c != d])
1597(EqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x==y])
1598(NeqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x!=y])
1599
1600(EqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [false])
1601(EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [false])
1602(EqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [false])
1603(EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [false])
1604(NeqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [true])
1605(NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [true])
1606(NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [true])
1607(NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [true])
1608
1609// Simplify address comparisons.
1610(EqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (Not (IsNonNil o1))
1611(NeqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (IsNonNil o1)
1612(EqPtr (Const(32|64) [0]) p) => (Not (IsNonNil p))
1613(NeqPtr (Const(32|64) [0]) p) => (IsNonNil p)
1614(EqPtr (ConstNil) p) => (Not (IsNonNil p))
1615(NeqPtr (ConstNil) p) => (IsNonNil p)
1616
1617// Evaluate constant user nil checks.
1618(IsNonNil (ConstNil)) => (ConstBool [false])
1619(IsNonNil (Const(32|64) [c])) => (ConstBool [c != 0])
1620(IsNonNil (Addr _) ) => (ConstBool [true])
1621(IsNonNil (Convert (Addr _) _)) => (ConstBool [true])
1622(IsNonNil (LocalAddr _ _)) => (ConstBool [true])
1623
1624// Inline small or disjoint runtime.memmove calls with constant length.
1625// See the comment in op Move in genericOps.go for discussion of the type.
1626//
1627// Note that we've lost any knowledge of the type and alignment requirements
1628// of the source and destination. We only know the size, and that the type
1629// contains no pointers.
1630// The type of the move is not necessarily v.Args[0].Type().Elem()!
1631// See issue 55122 for details.
1632//
1633// Because expand calls runs after prove, constants useful to this pattern may not appear.
1634// Both versions need to exist; the memory and register variants.
1635//
1636// Match post-expansion calls, memory version.
1637(SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
1638 && sz >= 0
1639 && isSameCall(sym, "runtime.memmove")
1640 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
1641 && isInlinableMemmove(dst, src, int64(sz), config)
1642 && clobber(s1, s2, s3, call)
1643 => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
1644
1645// Match post-expansion calls, register version.
1646(SelectN [0] call:(StaticCall {sym} dst src (Const(64|32) [sz]) mem))
1647 && sz >= 0
1648 && call.Uses == 1 // this will exclude all calls with results
1649 && isSameCall(sym, "runtime.memmove")
1650 && isInlinableMemmove(dst, src, int64(sz), config)
1651 && clobber(call)
1652 => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
1653
1654// Match pre-expansion calls.
1655(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem))
1656 && sz >= 0
1657 && call.Uses == 1 // this will exclude all calls with results
1658 && isSameCall(sym, "runtime.memmove")
1659 && isInlinableMemmove(dst, src, int64(sz), config)
1660 && clobber(call)
1661 => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
1662
1663// De-virtualize late-expanded interface calls into late-expanded static calls.
1664(InterLECall [argsize] {auxCall} (Addr {fn} (SB)) ___) => devirtLECall(v, fn.(*obj.LSym))
1665
1666// Move and Zero optimizations.
1667// Move source and destination may overlap.
1668
1669// Convert Moves into Zeros when the source is known to be zeros.
1670(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2)
1671 => (Zero {t} [n] dst1 mem)
1672(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0)
1673 => (Zero {t} [n] dst1 mem)
1674(Move {t} [n] dst (Addr {sym} (SB)) mem) && symIsROZero(sym) => (Zero {t} [n] dst mem)
1675
1676// Don't Store to variables that are about to be overwritten by Move/Zero.
1677(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
1678 && isSamePtr(p1, p2) && store.Uses == 1
1679 && n >= o2 + t2.Size()
1680 && clobber(store)
1681 => (Zero {t1} [n] p1 mem)
1682(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
1683 && isSamePtr(dst1, dst2) && store.Uses == 1
1684 && n >= o2 + t2.Size()
1685 && disjoint(src1, n, op, t2.Size())
1686 && clobber(store)
1687 => (Move {t1} [n] dst1 src1 mem)
1688
1689// Don't Move to variables that are immediately completely overwritten.
1690(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
1691 && move.Uses == 1
1692 && isSamePtr(dst1, dst2)
1693 && clobber(move)
1694 => (Zero {t} [n] dst1 mem)
1695(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
1696 && move.Uses == 1
1697 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
1698 && clobber(move)
1699 => (Move {t} [n] dst1 src1 mem)
1700(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
1701 && move.Uses == 1 && vardef.Uses == 1
1702 && isSamePtr(dst1, dst2)
1703 && clobber(move, vardef)
1704 => (Zero {t} [n] dst1 (VarDef {x} mem))
1705(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
1706 && move.Uses == 1 && vardef.Uses == 1
1707 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
1708 && clobber(move, vardef)
1709 => (Move {t} [n] dst1 src1 (VarDef {x} mem))
1710(Store {t1} op1:(OffPtr [o1] p1) d1
1711 m2:(Store {t2} op2:(OffPtr [0] p2) d2
1712 m3:(Move [n] p3 _ mem)))
1713 && m2.Uses == 1 && m3.Uses == 1
1714 && o1 == t2.Size()
1715 && n == t2.Size() + t1.Size()
1716 && isSamePtr(p1, p2) && isSamePtr(p2, p3)
1717 && clobber(m2, m3)
1718 => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
1719(Store {t1} op1:(OffPtr [o1] p1) d1
1720 m2:(Store {t2} op2:(OffPtr [o2] p2) d2
1721 m3:(Store {t3} op3:(OffPtr [0] p3) d3
1722 m4:(Move [n] p4 _ mem))))
1723 && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
1724 && o2 == t3.Size()
1725 && o1-o2 == t2.Size()
1726 && n == t3.Size() + t2.Size() + t1.Size()
1727 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
1728 && clobber(m2, m3, m4)
1729 => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
1730(Store {t1} op1:(OffPtr [o1] p1) d1
1731 m2:(Store {t2} op2:(OffPtr [o2] p2) d2
1732 m3:(Store {t3} op3:(OffPtr [o3] p3) d3
1733 m4:(Store {t4} op4:(OffPtr [0] p4) d4
1734 m5:(Move [n] p5 _ mem)))))
1735 && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
1736 && o3 == t4.Size()
1737 && o2-o3 == t3.Size()
1738 && o1-o2 == t2.Size()
1739 && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
1740 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
1741 && clobber(m2, m3, m4, m5)
1742 => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
1743
1744// Don't Zero variables that are immediately completely overwritten
1745// before being accessed.
1746(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
1747 && zero.Uses == 1
1748 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
1749 && clobber(zero)
1750 => (Move {t} [n] dst1 src1 mem)
1751(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
1752 && zero.Uses == 1 && vardef.Uses == 1
1753 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
1754 && clobber(zero, vardef)
1755 => (Move {t} [n] dst1 src1 (VarDef {x} mem))
1756(Store {t1} op1:(OffPtr [o1] p1) d1
1757 m2:(Store {t2} op2:(OffPtr [0] p2) d2
1758 m3:(Zero [n] p3 mem)))
1759 && m2.Uses == 1 && m3.Uses == 1
1760 && o1 == t2.Size()
1761 && n == t2.Size() + t1.Size()
1762 && isSamePtr(p1, p2) && isSamePtr(p2, p3)
1763 && clobber(m2, m3)
1764 => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
1765(Store {t1} op1:(OffPtr [o1] p1) d1
1766 m2:(Store {t2} op2:(OffPtr [o2] p2) d2
1767 m3:(Store {t3} op3:(OffPtr [0] p3) d3
1768 m4:(Zero [n] p4 mem))))
1769 && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
1770 && o2 == t3.Size()
1771 && o1-o2 == t2.Size()
1772 && n == t3.Size() + t2.Size() + t1.Size()
1773 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
1774 && clobber(m2, m3, m4)
1775 => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
1776(Store {t1} op1:(OffPtr [o1] p1) d1
1777 m2:(Store {t2} op2:(OffPtr [o2] p2) d2
1778 m3:(Store {t3} op3:(OffPtr [o3] p3) d3
1779 m4:(Store {t4} op4:(OffPtr [0] p4) d4
1780 m5:(Zero [n] p5 mem)))))
1781 && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
1782 && o3 == t4.Size()
1783 && o2-o3 == t3.Size()
1784 && o1-o2 == t2.Size()
1785 && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
1786 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
1787 && clobber(m2, m3, m4, m5)
1788 => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
1789
1790// Don't Move from memory if the values are likely to already be
1791// in registers.
1792(Move {t1} [n] dst p1
1793 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1794 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
1795 && isSamePtr(p1, p2) && isSamePtr(p2, p3)
1796 && t2.Alignment() <= t1.Alignment()
1797 && t3.Alignment() <= t1.Alignment()
1798 && registerizable(b, t2)
1799 && registerizable(b, t3)
1800 && o2 == t3.Size()
1801 && n == t2.Size() + t3.Size()
1802 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1803 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
1804(Move {t1} [n] dst p1
1805 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1806 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
1807 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
1808 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
1809 && t2.Alignment() <= t1.Alignment()
1810 && t3.Alignment() <= t1.Alignment()
1811 && t4.Alignment() <= t1.Alignment()
1812 && registerizable(b, t2)
1813 && registerizable(b, t3)
1814 && registerizable(b, t4)
1815 && o3 == t4.Size()
1816 && o2-o3 == t3.Size()
1817 && n == t2.Size() + t3.Size() + t4.Size()
1818 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1819 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1820 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
1821(Move {t1} [n] dst p1
1822 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1823 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
1824 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
1825 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
1826 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
1827 && t2.Alignment() <= t1.Alignment()
1828 && t3.Alignment() <= t1.Alignment()
1829 && t4.Alignment() <= t1.Alignment()
1830 && t5.Alignment() <= t1.Alignment()
1831 && registerizable(b, t2)
1832 && registerizable(b, t3)
1833 && registerizable(b, t4)
1834 && registerizable(b, t5)
1835 && o4 == t5.Size()
1836 && o3-o4 == t4.Size()
1837 && o2-o3 == t3.Size()
1838 && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
1839 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1840 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1841 (Store {t4} (OffPtr <tt4> [o4] dst) d3
1842 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
1843
1844// Same thing but with VarDef in the middle.
1845(Move {t1} [n] dst p1
1846 mem:(VarDef
1847 (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1848 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
1849 && isSamePtr(p1, p2) && isSamePtr(p2, p3)
1850 && t2.Alignment() <= t1.Alignment()
1851 && t3.Alignment() <= t1.Alignment()
1852 && registerizable(b, t2)
1853 && registerizable(b, t3)
1854 && o2 == t3.Size()
1855 && n == t2.Size() + t3.Size()
1856 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1857 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
1858(Move {t1} [n] dst p1
1859 mem:(VarDef
1860 (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1861 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
1862 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
1863 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
1864 && t2.Alignment() <= t1.Alignment()
1865 && t3.Alignment() <= t1.Alignment()
1866 && t4.Alignment() <= t1.Alignment()
1867 && registerizable(b, t2)
1868 && registerizable(b, t3)
1869 && registerizable(b, t4)
1870 && o3 == t4.Size()
1871 && o2-o3 == t3.Size()
1872 && n == t2.Size() + t3.Size() + t4.Size()
1873 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1874 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1875 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
1876(Move {t1} [n] dst p1
1877 mem:(VarDef
1878 (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1879 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
1880 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
1881 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
1882 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
1883 && t2.Alignment() <= t1.Alignment()
1884 && t3.Alignment() <= t1.Alignment()
1885 && t4.Alignment() <= t1.Alignment()
1886 && t5.Alignment() <= t1.Alignment()
1887 && registerizable(b, t2)
1888 && registerizable(b, t3)
1889 && registerizable(b, t4)
1890 && registerizable(b, t5)
1891 && o4 == t5.Size()
1892 && o3-o4 == t4.Size()
1893 && o2-o3 == t3.Size()
1894 && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
1895 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1896 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1897 (Store {t4} (OffPtr <tt4> [o4] dst) d3
1898 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
1899
1900// Prefer to Zero and Store than to Move.
1901(Move {t1} [n] dst p1
1902 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1903 (Zero {t3} [n] p3 _)))
1904 && isSamePtr(p1, p2) && isSamePtr(p2, p3)
1905 && t2.Alignment() <= t1.Alignment()
1906 && t3.Alignment() <= t1.Alignment()
1907 && registerizable(b, t2)
1908 && n >= o2 + t2.Size()
1909 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1910 (Zero {t1} [n] dst mem))
1911(Move {t1} [n] dst p1
1912 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
1913 (Store {t3} (OffPtr <tt3> [o3] p3) d2
1914 (Zero {t4} [n] p4 _))))
1915 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
1916 && t2.Alignment() <= t1.Alignment()
1917 && t3.Alignment() <= t1.Alignment()
1918 && t4.Alignment() <= t1.Alignment()
1919 && registerizable(b, t2)
1920 && registerizable(b, t3)
1921 && n >= o2 + t2.Size()
1922 && n >= o3 + t3.Size()
1923 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1924 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1925 (Zero {t1} [n] dst mem)))
1926(Move {t1} [n] dst p1
1927 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
1928 (Store {t3} (OffPtr <tt3> [o3] p3) d2
1929 (Store {t4} (OffPtr <tt4> [o4] p4) d3
1930 (Zero {t5} [n] p5 _)))))
1931 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
1932 && t2.Alignment() <= t1.Alignment()
1933 && t3.Alignment() <= t1.Alignment()
1934 && t4.Alignment() <= t1.Alignment()
1935 && t5.Alignment() <= t1.Alignment()
1936 && registerizable(b, t2)
1937 && registerizable(b, t3)
1938 && registerizable(b, t4)
1939 && n >= o2 + t2.Size()
1940 && n >= o3 + t3.Size()
1941 && n >= o4 + t4.Size()
1942 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1943 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1944 (Store {t4} (OffPtr <tt4> [o4] dst) d3
1945 (Zero {t1} [n] dst mem))))
1946(Move {t1} [n] dst p1
1947 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
1948 (Store {t3} (OffPtr <tt3> [o3] p3) d2
1949 (Store {t4} (OffPtr <tt4> [o4] p4) d3
1950 (Store {t5} (OffPtr <tt5> [o5] p5) d4
1951 (Zero {t6} [n] p6 _))))))
1952 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
1953 && t2.Alignment() <= t1.Alignment()
1954 && t3.Alignment() <= t1.Alignment()
1955 && t4.Alignment() <= t1.Alignment()
1956 && t5.Alignment() <= t1.Alignment()
1957 && t6.Alignment() <= t1.Alignment()
1958 && registerizable(b, t2)
1959 && registerizable(b, t3)
1960 && registerizable(b, t4)
1961 && registerizable(b, t5)
1962 && n >= o2 + t2.Size()
1963 && n >= o3 + t3.Size()
1964 && n >= o4 + t4.Size()
1965 && n >= o5 + t5.Size()
1966 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1967 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1968 (Store {t4} (OffPtr <tt4> [o4] dst) d3
1969 (Store {t5} (OffPtr <tt5> [o5] dst) d4
1970 (Zero {t1} [n] dst mem)))))
1971(Move {t1} [n] dst p1
1972 mem:(VarDef
1973 (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
1974 (Zero {t3} [n] p3 _))))
1975 && isSamePtr(p1, p2) && isSamePtr(p2, p3)
1976 && t2.Alignment() <= t1.Alignment()
1977 && t3.Alignment() <= t1.Alignment()
1978 && registerizable(b, t2)
1979 && n >= o2 + t2.Size()
1980 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1981 (Zero {t1} [n] dst mem))
1982(Move {t1} [n] dst p1
1983 mem:(VarDef
1984 (Store {t2} (OffPtr <tt2> [o2] p2) d1
1985 (Store {t3} (OffPtr <tt3> [o3] p3) d2
1986 (Zero {t4} [n] p4 _)))))
1987 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
1988 && t2.Alignment() <= t1.Alignment()
1989 && t3.Alignment() <= t1.Alignment()
1990 && t4.Alignment() <= t1.Alignment()
1991 && registerizable(b, t2)
1992 && registerizable(b, t3)
1993 && n >= o2 + t2.Size()
1994 && n >= o3 + t3.Size()
1995 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
1996 (Store {t3} (OffPtr <tt3> [o3] dst) d2
1997 (Zero {t1} [n] dst mem)))
1998(Move {t1} [n] dst p1
1999 mem:(VarDef
2000 (Store {t2} (OffPtr <tt2> [o2] p2) d1
2001 (Store {t3} (OffPtr <tt3> [o3] p3) d2
2002 (Store {t4} (OffPtr <tt4> [o4] p4) d3
2003 (Zero {t5} [n] p5 _))))))
2004 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
2005 && t2.Alignment() <= t1.Alignment()
2006 && t3.Alignment() <= t1.Alignment()
2007 && t4.Alignment() <= t1.Alignment()
2008 && t5.Alignment() <= t1.Alignment()
2009 && registerizable(b, t2)
2010 && registerizable(b, t3)
2011 && registerizable(b, t4)
2012 && n >= o2 + t2.Size()
2013 && n >= o3 + t3.Size()
2014 && n >= o4 + t4.Size()
2015 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
2016 (Store {t3} (OffPtr <tt3> [o3] dst) d2
2017 (Store {t4} (OffPtr <tt4> [o4] dst) d3
2018 (Zero {t1} [n] dst mem))))
2019(Move {t1} [n] dst p1
2020 mem:(VarDef
2021 (Store {t2} (OffPtr <tt2> [o2] p2) d1
2022 (Store {t3} (OffPtr <tt3> [o3] p3) d2
2023 (Store {t4} (OffPtr <tt4> [o4] p4) d3
2024 (Store {t5} (OffPtr <tt5> [o5] p5) d4
2025 (Zero {t6} [n] p6 _)))))))
2026 && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
2027 && t2.Alignment() <= t1.Alignment()
2028 && t3.Alignment() <= t1.Alignment()
2029 && t4.Alignment() <= t1.Alignment()
2030 && t5.Alignment() <= t1.Alignment()
2031 && t6.Alignment() <= t1.Alignment()
2032 && registerizable(b, t2)
2033 && registerizable(b, t3)
2034 && registerizable(b, t4)
2035 && registerizable(b, t5)
2036 && n >= o2 + t2.Size()
2037 && n >= o3 + t3.Size()
2038 && n >= o4 + t4.Size()
2039 && n >= o5 + t5.Size()
2040 => (Store {t2} (OffPtr <tt2> [o2] dst) d1
2041 (Store {t3} (OffPtr <tt3> [o3] dst) d2
2042 (Store {t4} (OffPtr <tt4> [o4] dst) d3
2043 (Store {t5} (OffPtr <tt5> [o5] dst) d4
2044 (Zero {t1} [n] dst mem)))))
2045
2046(SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x
2047(SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x
2048
2049// When rewriting append to growslice, we use as the new length the result of
2050// growslice so that we don't have to spill/restore the new length around the growslice call.
2051// The exception here is that if the new length is a constant, avoiding spilling it
2052// is pointless and its constantness is sometimes useful for subsequent optimizations.
2053// See issue 56440.
2054// Note there are 2 rules here, one for the pre-decomposed []T result and one for
2055// the post-decomposed (*T,int,int) result. (The latter is generated after call expansion.)
2056// TODO(thepudds): we probably need the new growsliceBuf and growsliceBufNoAlias here as well?
2057(SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const(64|32)) _ _ _ _)))
2058 && (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
2059 => newLen
2060(SelectN [1] (StaticCall {sym} _ newLen:(Const(64|32)) _ _ _ _)) && v.Type.IsInteger()
2061 && (isSameCall(sym, "runtime.growslice") || isSameCall(sym, "runtime.growsliceNoAlias"))
2062 => newLen
2063
2064// Collapse moving A -> B -> C into just A -> C.
2065// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
2066// This happens most commonly when B is an autotmp inserted earlier
2067// during compilation to ensure correctness.
2068// Take care that overlapping moves are preserved.
2069// Restrict this optimization to the stack, to avoid duplicating loads from the heap;
2070// see CL 145208 for discussion.
2071(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
2072 && t1.Compare(t2) == types.CMPeq
2073 && isSamePtr(tmp1, tmp2)
2074 && isStackPtr(src) && !isVolatile(src)
2075 && disjoint(src, s, tmp2, s)
2076 && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
2077 => (Move {t1} [s] dst src midmem)
2078
2079// Same, but for large types that require VarDefs.
2080(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
2081 && t1.Compare(t2) == types.CMPeq
2082 && isSamePtr(tmp1, tmp2)
2083 && isStackPtr(src) && !isVolatile(src)
2084 && disjoint(src, s, tmp2, s)
2085 && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
2086 => (Move {t1} [s] dst src midmem)
2087
2088// Don't zero the same bits twice.
2089(Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) && isSamePtr(dst1, dst2) => zero
2090(Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) && isSamePtr(dst1, dst2) => vardef
2091
2092// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
2093// However, this rule is needed to prevent the previous rule from looping forever in such cases.
2094(Move dst src mem) && isSamePtr(dst, src) => mem
2095
2096// Constant rotate detection.
2097((Add64|Or64|Xor64) (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d]))) && c < 64 && d == 64-c && canRotate(config, 64) => (RotateLeft64 x z)
2098((Add32|Or32|Xor32) (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d]))) && c < 32 && d == 32-c && canRotate(config, 32) => (RotateLeft32 x z)
2099((Add16|Or16|Xor16) (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d]))) && c < 16 && d == 16-c && canRotate(config, 16) => (RotateLeft16 x z)
2100((Add8|Or8|Xor8) (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d]))) && c < 8 && d == 8-c && canRotate(config, 8) => (RotateLeft8 x z)
2101
2102// Non-constant rotate detection.
2103// We use shiftIsBounded to make sure that neither of the shifts are >64.
2104// Note: these rules are subtle when the shift amounts are 0/64, as Go shifts
2105// are different from most native shifts. But it works out.
2106((Add64|Or64|Xor64) left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
2107((Add64|Or64|Xor64) left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
2108((Add64|Or64|Xor64) left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
2109((Add64|Or64|Xor64) left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
2110
2111((Add64|Or64|Xor64) right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
2112((Add64|Or64|Xor64) right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
2113((Add64|Or64|Xor64) right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
2114((Add64|Or64|Xor64) right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
2115
2116((Add32|Or32|Xor32) left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
2117((Add32|Or32|Xor32) left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
2118((Add32|Or32|Xor32) left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
2119((Add32|Or32|Xor32) left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
2120
2121((Add32|Or32|Xor32) right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
2122((Add32|Or32|Xor32) right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
2123((Add32|Or32|Xor32) right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
2124((Add32|Or32|Xor32) right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
2125
2126((Add16|Or16|Xor16) left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
2127((Add16|Or16|Xor16) left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
2128((Add16|Or16|Xor16) left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
2129((Add16|Or16|Xor16) left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
2130
2131((Add16|Or16|Xor16) right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
2132((Add16|Or16|Xor16) right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
2133((Add16|Or16|Xor16) right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
2134((Add16|Or16|Xor16) right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
2135
2136((Add8|Or8|Xor8) left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
2137((Add8|Or8|Xor8) left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
2138((Add8|Or8|Xor8) left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
2139((Add8|Or8|Xor8) left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
2140
2141((Add8|Or8|Xor8) right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
2142((Add8|Or8|Xor8) right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
2143((Add8|Or8|Xor8) right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
2144((Add8|Or8|Xor8) right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
2145
2146// Rotating by y&c, with c a mask that doesn't change the bottom bits, is the same as rotating by y.
2147(RotateLeft64 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 63 => (RotateLeft64 x y)
2148(RotateLeft32 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 31 => (RotateLeft32 x y)
2149(RotateLeft16 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 15 => (RotateLeft16 x y)
2150(RotateLeft8 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 7 => (RotateLeft8 x y)
2151
2152// Rotating by -(y&c), with c a mask that doesn't change the bottom bits, is the same as rotating by -y.
2153(RotateLeft64 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&63 == 63 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y))
2154(RotateLeft32 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&31 == 31 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y))
2155(RotateLeft16 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&15 == 15 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y))
2156(RotateLeft8 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&7 == 7 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y))
2157
2158// Rotating by y+c, with c a multiple of the value width, is the same as rotating by y.
2159(RotateLeft64 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 0 => (RotateLeft64 x y)
2160(RotateLeft32 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 0 => (RotateLeft32 x y)
2161(RotateLeft16 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 0 => (RotateLeft16 x y)
2162(RotateLeft8 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 0 => (RotateLeft8 x y)
2163
2164// Rotating by c-y, with c a multiple of the value width, is the same as rotating by -y.
2165(RotateLeft64 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&63 == 0 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y))
2166(RotateLeft32 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&31 == 0 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y))
2167(RotateLeft16 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&15 == 0 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y))
2168(RotateLeft8 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&7 == 0 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y))
2169
2170// Ensure we don't do Const64 rotates in a 32-bit system.
2171(RotateLeft64 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft64 x (Const32 <t> [int32(c)]))
2172(RotateLeft32 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft32 x (Const32 <t> [int32(c)]))
2173(RotateLeft16 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft16 x (Const32 <t> [int32(c)]))
2174(RotateLeft8 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft8 x (Const32 <t> [int32(c)]))
2175
2176// Rotating by c, then by d, is the same as rotating by c+d.
2177// We're trading a rotate for an add, which seems generally a good choice. It is especially good when c and d are constants.
2178// This rule is a bit tricky as c and d might be different widths. We handle only cases where they are the same width.
2179(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 8 && d.Type.Size() == 8 => (RotateLeft(64|32|16|8) x (Add64 <c.Type> c d))
2180(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 4 && d.Type.Size() == 4 => (RotateLeft(64|32|16|8) x (Add32 <c.Type> c d))
2181(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 <c.Type> c d))
2182(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 <c.Type> c d))
2183
2184// Loading fixed addresses and constants.
2185(Load (Addr {s} sb) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
2186(Load (Convert (Addr {s} sb) _) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
2187(Load (ITab (IMake (Addr {s} sb) _)) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
2188(Load (ITab (IMake (Convert (Addr {s} sb) _) _)) _) && isFixedLoad(v, s, 0) => rewriteFixedLoad(v, s, sb, 0)
2189(Load (OffPtr [off] (Addr {s} sb) ) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
2190(Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
2191(Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
2192(Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && isFixedLoad(v, s, off) => rewriteFixedLoad(v, s, sb, off)
2193
2194// Calling cmpstring a second time with the same arguments in the
2195// same memory state can reuse the results of the first call.
2196// See issue 61725.
2197// Note that this could pretty easily generalize to any pure function.
2198(SelectN [0] (StaticLECall {f} x y (SelectN [1] c:(StaticLECall {g} x y mem))))
2199 && isSameCall(f, "runtime.cmpstring")
2200 && isSameCall(g, "runtime.cmpstring")
2201=> @c.Block (SelectN [0] <typ.Int> c)
2202
2203// If we don't use the result of cmpstring, might as well not call it.
2204// Note that this could pretty easily generalize to any pure function.
2205(SelectN [1] c:(StaticLECall {f} _ _ mem)) && c.Uses == 1 && isSameCall(f, "runtime.cmpstring") && clobber(c) => mem
2206
2207// We can easily compute the result of efaceeq if
2208// we know the underlying type is pointer-ish.
2209(StaticLECall {f} typ_ x y mem)
2210 && isSameCall(f, "runtime.efaceeq")
2211 && isDirectAndComparableType(typ_)
2212 && clobber(v)
2213 => (MakeResult (EqPtr x y) mem)
2214
2215// We can easily compute the result of ifaceeq if
2216// we know the underlying type is pointer-ish.
2217(StaticLECall {f} itab x y mem)
2218 && isSameCall(f, "runtime.ifaceeq")
2219 && isDirectAndComparableIface(itab)
2220 && clobber(v)
2221 => (MakeResult (EqPtr x y) mem)
2222
2223// If we use the result of slicebytetostring in a map lookup operation,
2224// then we don't need to actually do the []byte->string conversion.
2225// We can just use the ptr/len of the byte slice directly as a (temporary) string.
2226//
2227// Note that this does not handle some obscure cases like
2228// m[[2]string{string(b1), string(b2)}]. There is code in ../walk/order.go
2229// which handles some of those cases.
2230(StaticLECall {f} [argsize] typ_ map_ key:(SelectN [0] sbts:(StaticLECall {g} _ ptr len mem)) m:(SelectN [1] sbts))
2231 && (isSameCall(f, "runtime.mapaccess1_faststr")
2232 || isSameCall(f, "runtime.mapaccess2_faststr")
2233 || isSameCall(f, "runtime.mapdelete_faststr"))
2234 && isSameCall(g, "runtime.slicebytetostring")
2235 && key.Uses == 1
2236 && sbts.Uses == 2
2237 && resetCopy(m, mem)
2238 && clobber(sbts)
2239 && clobber(key)
2240=> (StaticLECall {f} [argsize] typ_ map_ (StringMake <typ.String> ptr len) mem)
2241
2242// Similarly to map lookups, also handle unique.Make for strings, which unique.Make will clone.
2243(StaticLECall {f} [argsize] dict_ key:(SelectN [0] sbts:(StaticLECall {g} _ ptr len mem)) m:(SelectN [1] sbts))
2244 && isSameCall(f, "unique.Make[go.shape.string]")
2245 && isSameCall(g, "runtime.slicebytetostring")
2246 && key.Uses == 1
2247 && sbts.Uses == 2
2248 && resetCopy(m, mem)
2249 && clobber(sbts)
2250 && clobber(key)
2251=> (StaticLECall {f} [argsize] dict_ (StringMake <typ.String> ptr len) mem)
2252
2253// Transform some CondSelect into math operations.
2254// if b { x++ } => x += b // but not on arm64 because it has CSINC
2255(CondSelect (Add8 <t> x (Const8 [1])) x bool) && config.arch != "arm64" => (Add8 x (CvtBoolToUint8 <t> bool))
2256(CondSelect (Add(64|32|16) <t> x (Const(64|32|16) [1])) x bool) && config.arch != "arm64" => (Add(64|32|16) x (ZeroExt8to(64|32|16) <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
2257
2258// if b { x-- } => x -= b
2259(CondSelect (Add8 <t> x (Const8 [-1])) x bool) => (Sub8 x (CvtBoolToUint8 <t> bool))
2260(CondSelect (Add(64|32|16) <t> x (Const(64|32|16) [-1])) x bool) => (Sub(64|32|16) x (ZeroExt8to(64|32|16) <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
2261
2262// if b { x <<= 1 } => x <<= b
2263(CondSelect (Lsh(64|32|16|8)x64 x (Const64 [1])) x bool) => (Lsh(64|32|16|8)x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
2264
2265// if b { x >>= 1 } => x >>= b
2266(CondSelect (Rsh(64|32|16|8)x64 x (Const64 [1])) x bool) => (Rsh(64|32|16|8)x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
2267(CondSelect (Rsh(64|32|16|8)Ux64 x (Const64 [1])) x bool) => (Rsh(64|32|16|8)Ux8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
2268
2269// bool(int(x)) => x
2270(Neq8 (CvtBoolToUint8 x) (Const8 [0])) => x
2271(Neq8 (CvtBoolToUint8 x) (Const8 [1])) => (Not x)
2272(Eq8 (CvtBoolToUint8 x) (Const8 [1])) => x
2273(Eq8 (CvtBoolToUint8 x) (Const8 [0])) => (Not x)
2274(Neq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => x
2275(Neq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => (Not x)
2276(Eq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => x
2277(Eq(64|32|16) (ZeroExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => (Not x)
2278(Neq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => x
2279(Neq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => (Not x)
2280(Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => x
2281(Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => (Not x)
View as plain text