1 //===-- X86InstrSSE.td - SSE Instruction Set ---------------*- tablegen -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
13 //
14 //===----------------------------------------------------------------------===//
16 class OpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm> {
17 InstrItinClass rr = arg_rr;
18 InstrItinClass rm = arg_rm;
19 // InstrSchedModel info.
20 X86FoldableSchedWrite Sched = WriteFAdd;
21 }
23 class SizeItins<OpndItins arg_s, OpndItins arg_d> {
24 OpndItins s = arg_s;
25 OpndItins d = arg_d;
26 }
29 class ShiftOpndItins<InstrItinClass arg_rr, InstrItinClass arg_rm,
30 InstrItinClass arg_ri> {
31 InstrItinClass rr = arg_rr;
32 InstrItinClass rm = arg_rm;
33 InstrItinClass ri = arg_ri;
34 }
37 // scalar
38 let Sched = WriteFAdd in {
39 def SSE_ALU_F32S : OpndItins<
40 IIC_SSE_ALU_F32S_RR, IIC_SSE_ALU_F32S_RM
41 >;
43 def SSE_ALU_F64S : OpndItins<
44 IIC_SSE_ALU_F64S_RR, IIC_SSE_ALU_F64S_RM
45 >;
46 }
48 def SSE_ALU_ITINS_S : SizeItins<
49 SSE_ALU_F32S, SSE_ALU_F64S
50 >;
52 let Sched = WriteFMul in {
53 def SSE_MUL_F32S : OpndItins<
54 IIC_SSE_MUL_F32S_RR, IIC_SSE_MUL_F64S_RM
55 >;
57 def SSE_MUL_F64S : OpndItins<
58 IIC_SSE_MUL_F64S_RR, IIC_SSE_MUL_F64S_RM
59 >;
60 }
62 def SSE_MUL_ITINS_S : SizeItins<
63 SSE_MUL_F32S, SSE_MUL_F64S
64 >;
66 let Sched = WriteFDiv in {
67 def SSE_DIV_F32S : OpndItins<
68 IIC_SSE_DIV_F32S_RR, IIC_SSE_DIV_F64S_RM
69 >;
71 def SSE_DIV_F64S : OpndItins<
72 IIC_SSE_DIV_F64S_RR, IIC_SSE_DIV_F64S_RM
73 >;
74 }
76 def SSE_DIV_ITINS_S : SizeItins<
77 SSE_DIV_F32S, SSE_DIV_F64S
78 >;
80 // parallel
81 let Sched = WriteFAdd in {
82 def SSE_ALU_F32P : OpndItins<
83 IIC_SSE_ALU_F32P_RR, IIC_SSE_ALU_F32P_RM
84 >;
86 def SSE_ALU_F64P : OpndItins<
87 IIC_SSE_ALU_F64P_RR, IIC_SSE_ALU_F64P_RM
88 >;
89 }
91 def SSE_ALU_ITINS_P : SizeItins<
92 SSE_ALU_F32P, SSE_ALU_F64P
93 >;
95 let Sched = WriteFMul in {
96 def SSE_MUL_F32P : OpndItins<
97 IIC_SSE_MUL_F32P_RR, IIC_SSE_MUL_F64P_RM
98 >;
100 def SSE_MUL_F64P : OpndItins<
101 IIC_SSE_MUL_F64P_RR, IIC_SSE_MUL_F64P_RM
102 >;
103 }
105 def SSE_MUL_ITINS_P : SizeItins<
106 SSE_MUL_F32P, SSE_MUL_F64P
107 >;
109 let Sched = WriteFDiv in {
110 def SSE_DIV_F32P : OpndItins<
111 IIC_SSE_DIV_F32P_RR, IIC_SSE_DIV_F64P_RM
112 >;
114 def SSE_DIV_F64P : OpndItins<
115 IIC_SSE_DIV_F64P_RR, IIC_SSE_DIV_F64P_RM
116 >;
117 }
119 def SSE_DIV_ITINS_P : SizeItins<
120 SSE_DIV_F32P, SSE_DIV_F64P
121 >;
123 let Sched = WriteVecLogic in
124 def SSE_VEC_BIT_ITINS_P : OpndItins<
125 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
126 >;
128 def SSE_BIT_ITINS_P : OpndItins<
129 IIC_SSE_BIT_P_RR, IIC_SSE_BIT_P_RM
130 >;
132 let Sched = WriteVecALU in {
133 def SSE_INTALU_ITINS_P : OpndItins<
134 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
135 >;
137 def SSE_INTALUQ_ITINS_P : OpndItins<
138 IIC_SSE_INTALUQ_P_RR, IIC_SSE_INTALUQ_P_RM
139 >;
140 }
142 let Sched = WriteVecIMul in
143 def SSE_INTMUL_ITINS_P : OpndItins<
144 IIC_SSE_INTMUL_P_RR, IIC_SSE_INTMUL_P_RM
145 >;
147 def SSE_INTSHIFT_ITINS_P : ShiftOpndItins<
148 IIC_SSE_INTSH_P_RR, IIC_SSE_INTSH_P_RM, IIC_SSE_INTSH_P_RI
149 >;
151 def SSE_MOVA_ITINS : OpndItins<
152 IIC_SSE_MOVA_P_RR, IIC_SSE_MOVA_P_RM
153 >;
155 def SSE_MOVU_ITINS : OpndItins<
156 IIC_SSE_MOVU_P_RR, IIC_SSE_MOVU_P_RM
157 >;
159 def SSE_DPPD_ITINS : OpndItins<
160 IIC_SSE_DPPD_RR, IIC_SSE_DPPD_RM
161 >;
163 def SSE_DPPS_ITINS : OpndItins<
164 IIC_SSE_DPPS_RR, IIC_SSE_DPPD_RM
165 >;
167 def DEFAULT_ITINS : OpndItins<
168 IIC_ALU_NONMEM, IIC_ALU_MEM
169 >;
171 def SSE_EXTRACT_ITINS : OpndItins<
172 IIC_SSE_EXTRACTPS_RR, IIC_SSE_EXTRACTPS_RM
173 >;
175 def SSE_INSERT_ITINS : OpndItins<
176 IIC_SSE_INSERTPS_RR, IIC_SSE_INSERTPS_RM
177 >;
179 let Sched = WriteMPSAD in
180 def SSE_MPSADBW_ITINS : OpndItins<
181 IIC_SSE_MPSADBW_RR, IIC_SSE_MPSADBW_RM
182 >;
184 let Sched = WriteVecIMul in
185 def SSE_PMULLD_ITINS : OpndItins<
186 IIC_SSE_PMULLD_RR, IIC_SSE_PMULLD_RM
187 >;
189 // Definitions for backward compatibility.
190 // The instructions mapped on these definitions uses a different itinerary
191 // than the actual scheduling model.
192 let Sched = WriteShuffle in
193 def DEFAULT_ITINS_SHUFFLESCHED : OpndItins<
194 IIC_ALU_NONMEM, IIC_ALU_MEM
195 >;
197 let Sched = WriteVecIMul in
198 def DEFAULT_ITINS_VECIMULSCHED : OpndItins<
199 IIC_ALU_NONMEM, IIC_ALU_MEM
200 >;
202 let Sched = WriteShuffle in
203 def SSE_INTALU_ITINS_SHUFF_P : OpndItins<
204 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
205 >;
207 let Sched = WriteMPSAD in
208 def DEFAULT_ITINS_MPSADSCHED : OpndItins<
209 IIC_ALU_NONMEM, IIC_ALU_MEM
210 >;
212 let Sched = WriteFBlend in
213 def DEFAULT_ITINS_FBLENDSCHED : OpndItins<
214 IIC_ALU_NONMEM, IIC_ALU_MEM
215 >;
217 let Sched = WriteBlend in
218 def DEFAULT_ITINS_BLENDSCHED : OpndItins<
219 IIC_ALU_NONMEM, IIC_ALU_MEM
220 >;
222 let Sched = WriteVarBlend in
223 def DEFAULT_ITINS_VARBLENDSCHED : OpndItins<
224 IIC_ALU_NONMEM, IIC_ALU_MEM
225 >;
227 let Sched = WriteFBlend in
228 def SSE_INTALU_ITINS_FBLEND_P : OpndItins<
229 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
230 >;
232 let Sched = WriteBlend in
233 def SSE_INTALU_ITINS_BLEND_P : OpndItins<
234 IIC_SSE_INTALU_P_RR, IIC_SSE_INTALU_P_RM
235 >;
237 //===----------------------------------------------------------------------===//
238 // SSE 1 & 2 Instructions Classes
239 //===----------------------------------------------------------------------===//
241 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
242 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
243 RegisterClass RC, X86MemOperand x86memop,
244 OpndItins itins,
245 bit Is2Addr = 1> {
246 let isCommutable = 1 in {
247 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
248 !if(Is2Addr,
249 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
250 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
251 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))], itins.rr>,
252 Sched<[itins.Sched]>;
253 }
254 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
255 !if(Is2Addr,
256 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
257 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
258 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))], itins.rm>,
259 Sched<[itins.Sched.Folded, ReadAfterLd]>;
260 }
262 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
263 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
264 string asm, string SSEVer, string FPSizeStr,
265 Operand memopr, ComplexPattern mem_cpat,
266 OpndItins itins,
267 bit Is2Addr = 1> {
268 let isCodeGenOnly = 1 in {
269 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
270 !if(Is2Addr,
271 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
272 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
273 [(set RC:$dst, (!cast<Intrinsic>(
274 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
275 RC:$src1, RC:$src2))], itins.rr>,
276 Sched<[itins.Sched]>;
277 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
278 !if(Is2Addr,
279 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
280 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
281 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
282 SSEVer, "_", OpcodeStr, FPSizeStr))
283 RC:$src1, mem_cpat:$src2))], itins.rm>,
284 Sched<[itins.Sched.Folded, ReadAfterLd]>;
285 }
286 }
288 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
289 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
290 RegisterClass RC, ValueType vt,
291 X86MemOperand x86memop, PatFrag mem_frag,
292 Domain d, OpndItins itins, bit Is2Addr = 1> {
293 let isCommutable = 1 in
294 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
295 !if(Is2Addr,
296 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
297 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
298 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], itins.rr, d>,
299 Sched<[itins.Sched]>;
300 let mayLoad = 1 in
301 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
302 !if(Is2Addr,
303 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
304 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
305 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))],
306 itins.rm, d>,
307 Sched<[itins.Sched.Folded, ReadAfterLd]>;
308 }
310 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
311 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
312 string OpcodeStr, X86MemOperand x86memop,
313 list<dag> pat_rr, list<dag> pat_rm,
314 bit Is2Addr = 1> {
315 let isCommutable = 1, hasSideEffects = 0 in
316 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
317 !if(Is2Addr,
318 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
319 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
320 pat_rr, NoItinerary, d>,
321 Sched<[WriteVecLogic]>;
322 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
323 !if(Is2Addr,
324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
326 pat_rm, NoItinerary, d>,
327 Sched<[WriteVecLogicLd, ReadAfterLd]>;
328 }
330 //===----------------------------------------------------------------------===//
331 // Non-instruction patterns
332 //===----------------------------------------------------------------------===//
334 // A vector extract of the first f32/f64 position is a subregister copy
335 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
336 (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32)>;
337 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
338 (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64)>;
340 // A 128-bit subvector extract from the first 256-bit vector position
341 // is a subregister copy that needs no instruction.
342 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))),
343 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
344 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))),
345 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
347 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))),
348 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
349 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))),
350 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
352 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))),
353 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
354 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))),
355 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
357 // A 128-bit subvector insert to the first 256-bit vector position
358 // is a subregister copy that needs no instruction.
359 let AddedComplexity = 25 in { // to give priority over vinsertf128rm
360 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)),
361 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
362 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)),
363 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
364 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)),
365 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
366 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)),
367 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
368 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)),
369 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
370 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)),
371 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
372 }
374 // Implicitly promote a 32-bit scalar to a vector.
375 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
376 (COPY_TO_REGCLASS FR32:$src, VR128)>;
377 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
378 (COPY_TO_REGCLASS FR32:$src, VR128)>;
379 // Implicitly promote a 64-bit scalar to a vector.
380 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
381 (COPY_TO_REGCLASS FR64:$src, VR128)>;
382 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
383 (COPY_TO_REGCLASS FR64:$src, VR128)>;
385 // Bitcasts between 128-bit vector types. Return the original type since
386 // no instruction is needed for the conversion
387 let Predicates = [HasSSE2] in {
388 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
389 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
390 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
391 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
392 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
393 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
394 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
395 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
396 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
397 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
398 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
399 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
400 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
401 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
402 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
403 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
404 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
405 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
406 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
407 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
408 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
409 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
410 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
411 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
412 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
413 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
414 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
415 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
416 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
417 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
418 }
420 // Bitcasts between 256-bit vector types. Return the original type since
421 // no instruction is needed for the conversion
422 let Predicates = [HasAVX] in {
423 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
424 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
425 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
426 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
427 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
428 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
429 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
430 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
431 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
432 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
433 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
434 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
435 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
436 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
437 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
438 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
439 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
440 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
441 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
442 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
443 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
444 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
445 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
446 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
447 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
448 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
449 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
450 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
451 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
452 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
453 }
455 // Alias instructions that map fld0 to xorps for sse or vxorps for avx.
456 // This is expanded by ExpandPostRAPseudos.
457 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
458 isPseudo = 1, SchedRW = [WriteZero] in {
459 def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
460 [(set FR32:$dst, fp32imm0)]>, Requires<[HasSSE1]>;
461 def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
462 [(set FR64:$dst, fpimm0)]>, Requires<[HasSSE2]>;
463 }
465 //===----------------------------------------------------------------------===//
466 // AVX & SSE - Zero/One Vectors
467 //===----------------------------------------------------------------------===//
469 // Alias instruction that maps zero vector to pxor / xorp* for sse.
470 // This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then
471 // swizzled by ExecutionDepsFix to pxor.
472 // We set canFoldAsLoad because this can be converted to a constant-pool
473 // load of an all-zeros value if folding it would be beneficial.
474 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
475 isPseudo = 1, SchedRW = [WriteZero] in {
476 def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "",
477 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
478 }
480 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
481 def : Pat<(v4i32 immAllZerosV), (V_SET0)>;
482 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
483 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
484 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
487 // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI,
488 // and doesn't need it because on sandy bridge the register is set to zero
489 // at the rename stage without using any execution unit, so SET0PSY
490 // and SET0PDY can be used for vector int instructions without penalty
491 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
492 isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in {
493 def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "",
494 [(set VR256:$dst, (v8f32 immAllZerosV))]>;
495 }
497 let Predicates = [HasAVX] in
498 def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>;
500 let Predicates = [HasAVX2] in {
501 def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>;
502 def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>;
503 def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>;
504 def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>;
505 }
507 // AVX1 has no support for 256-bit integer instructions, but since the 128-bit
508 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
509 let Predicates = [HasAVX1Only] in {
510 def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
511 def : Pat<(bc_v32i8 (v8f32 immAllZerosV)),
512 (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>;
514 def : Pat<(v16i16 immAllZerosV), (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
515 def : Pat<(bc_v16i16 (v8f32 immAllZerosV)),
516 (SUBREG_TO_REG (i16 0), (V_SET0), sub_xmm)>;
518 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
519 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
520 (SUBREG_TO_REG (i32 0), (V_SET0), sub_xmm)>;
522 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
523 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
524 (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>;
525 }
527 // We set canFoldAsLoad because this can be converted to a constant-pool
528 // load of an all-ones value if folding it would be beneficial.
529 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
530 isPseudo = 1, SchedRW = [WriteZero] in {
531 def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "",
532 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
533 let Predicates = [HasAVX2] in
534 def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "",
535 [(set VR256:$dst, (v8i32 immAllOnesV))]>;
536 }
539 //===----------------------------------------------------------------------===//
540 // SSE 1 & 2 - Move FP Scalar Instructions
541 //
542 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
543 // register copies because it's a partial register update; Register-to-register
544 // movss/movsd is not modeled as an INSERT_SUBREG because INSERT_SUBREG requires
545 // that the insert be implementable in terms of a copy, and just mentioned, we
546 // don't use movss/movsd for copies.
547 //===----------------------------------------------------------------------===//
549 multiclass sse12_move_rr<RegisterClass RC, SDNode OpNode, ValueType vt,
550 X86MemOperand x86memop, string base_opc,
551 string asm_opr> {
552 def rr : SI<0x10, MRMSrcReg, (outs VR128:$dst),
553 (ins VR128:$src1, RC:$src2),
554 !strconcat(base_opc, asm_opr),
555 [(set VR128:$dst, (vt (OpNode VR128:$src1,
556 (scalar_to_vector RC:$src2))))],
557 IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
559 // For the disassembler
560 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
561 def rr_REV : SI<0x11, MRMDestReg, (outs VR128:$dst),
562 (ins VR128:$src1, RC:$src2),
563 !strconcat(base_opc, asm_opr),
564 [], IIC_SSE_MOV_S_RR>, Sched<[WriteFShuffle]>;
565 }
567 multiclass sse12_move<RegisterClass RC, SDNode OpNode, ValueType vt,
568 X86MemOperand x86memop, string OpcodeStr> {
569 // AVX
570 defm V#NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
571 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
572 VEX_4V, VEX_LIG;
574 def V#NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
575 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
576 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
577 VEX, VEX_LIG, Sched<[WriteStore]>;
578 // SSE1 & 2
579 let Constraints = "$src1 = $dst" in {
580 defm NAME : sse12_move_rr<RC, OpNode, vt, x86memop, OpcodeStr,
581 "\t{$src2, $dst|$dst, $src2}">;
582 }
584 def NAME#mr : SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
585 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
586 [(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
587 Sched<[WriteStore]>;
588 }
590 // Loading from memory automatically zeroing upper bits.
591 multiclass sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
592 PatFrag mem_pat, string OpcodeStr> {
593 def V#NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
594 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
595 [(set RC:$dst, (mem_pat addr:$src))],
596 IIC_SSE_MOV_S_RM>, VEX, VEX_LIG, Sched<[WriteLoad]>;
597 def NAME#rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
598 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
599 [(set RC:$dst, (mem_pat addr:$src))],
600 IIC_SSE_MOV_S_RM>, Sched<[WriteLoad]>;
601 }
603 defm MOVSS : sse12_move<FR32, X86Movss, v4f32, f32mem, "movss">, XS;
604 defm MOVSD : sse12_move<FR64, X86Movsd, v2f64, f64mem, "movsd">, XD;
606 let canFoldAsLoad = 1, isReMaterializable = 1 in {
607 defm MOVSS : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
609 let AddedComplexity = 20 in
610 defm MOVSD : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
611 }
613 // Patterns
614 let Predicates = [UseAVX] in {
615 let AddedComplexity = 20 in {
616 // MOVSSrm zeros the high parts of the register; represent this
617 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
618 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
619 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
620 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
621 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
622 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
623 (COPY_TO_REGCLASS (VMOVSSrm addr:$src), VR128)>;
625 // MOVSDrm zeros the high parts of the register; represent this
626 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
627 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
628 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
629 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
630 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
631 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
632 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
633 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
634 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
635 def : Pat<(v2f64 (X86vzload addr:$src)),
636 (COPY_TO_REGCLASS (VMOVSDrm addr:$src), VR128)>;
638 // Represent the same patterns above but in the form they appear for
639 // 256-bit types
640 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
641 (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
642 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
643 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
644 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
645 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>;
646 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
647 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
648 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>;
649 }
650 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
651 (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
652 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>;
654 // Extract and store.
655 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
656 addr:$dst),
657 (VMOVSSmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128:$src), FR32))>;
658 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
659 addr:$dst),
660 (VMOVSDmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128:$src), FR64))>;
662 // Shuffle with VMOVSS
663 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
664 (VMOVSSrr (v4i32 VR128:$src1),
665 (COPY_TO_REGCLASS (v4i32 VR128:$src2), FR32))>;
666 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
667 (VMOVSSrr (v4f32 VR128:$src1),
668 (COPY_TO_REGCLASS (v4f32 VR128:$src2), FR32))>;
670 // 256-bit variants
671 def : Pat<(v8i32 (X86Movss VR256:$src1, VR256:$src2)),
672 (SUBREG_TO_REG (i32 0),
673 (VMOVSSrr (EXTRACT_SUBREG (v8i32 VR256:$src1), sub_xmm),
674 (EXTRACT_SUBREG (v8i32 VR256:$src2), sub_xmm)),
675 sub_xmm)>;
676 def : Pat<(v8f32 (X86Movss VR256:$src1, VR256:$src2)),
677 (SUBREG_TO_REG (i32 0),
678 (VMOVSSrr (EXTRACT_SUBREG (v8f32 VR256:$src1), sub_xmm),
679 (EXTRACT_SUBREG (v8f32 VR256:$src2), sub_xmm)),
680 sub_xmm)>;
682 // Shuffle with VMOVSD
683 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
684 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
685 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
686 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
687 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
688 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
689 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
690 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
692 // 256-bit variants
693 def : Pat<(v4i64 (X86Movsd VR256:$src1, VR256:$src2)),
694 (SUBREG_TO_REG (i32 0),
695 (VMOVSDrr (EXTRACT_SUBREG (v4i64 VR256:$src1), sub_xmm),
696 (EXTRACT_SUBREG (v4i64 VR256:$src2), sub_xmm)),
697 sub_xmm)>;
698 def : Pat<(v4f64 (X86Movsd VR256:$src1, VR256:$src2)),
699 (SUBREG_TO_REG (i32 0),
700 (VMOVSDrr (EXTRACT_SUBREG (v4f64 VR256:$src1), sub_xmm),
701 (EXTRACT_SUBREG (v4f64 VR256:$src2), sub_xmm)),
702 sub_xmm)>;
704 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
705 // is during lowering, where it's not possible to recognize the fold cause
706 // it has two uses through a bitcast. One use disappears at isel time and the
707 // fold opportunity reappears.
708 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
709 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
710 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
711 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
712 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
713 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
714 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
715 (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
716 }
718 let Predicates = [UseSSE1] in {
719 let Predicates = [NoSSE41], AddedComplexity = 15 in {
720 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
721 // MOVSS to the lower bits.
722 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
723 (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
724 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
725 (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
726 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
727 (MOVSSrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
728 }
730 let AddedComplexity = 20 in {
731 // MOVSSrm already zeros the high parts of the register.
732 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
733 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
734 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
735 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
736 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
737 (COPY_TO_REGCLASS (MOVSSrm addr:$src), VR128)>;
738 }
740 // Extract and store.
741 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
742 addr:$dst),
743 (MOVSSmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR32))>;
745 // Shuffle with MOVSS
746 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
747 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
748 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
749 (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>;
750 }
752 let Predicates = [UseSSE2] in {
753 let Predicates = [NoSSE41], AddedComplexity = 15 in {
754 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
755 // MOVSD to the lower bits.
756 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
757 (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
758 }
760 let AddedComplexity = 20 in {
761 // MOVSDrm already zeros the high parts of the register.
762 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
763 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
764 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
765 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
766 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
767 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
768 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
769 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
770 def : Pat<(v2f64 (X86vzload addr:$src)),
771 (COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
772 }
774 // Extract and store.
775 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
776 addr:$dst),
777 (MOVSDmr addr:$dst, (COPY_TO_REGCLASS VR128:$src, FR64))>;
779 // Shuffle with MOVSD
780 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
781 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
782 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
783 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
784 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
785 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
786 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
787 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
789 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
790 // is during lowering, where it's not possible to recognize the fold cause
791 // it has two uses through a bitcast. One use disappears at isel time and the
792 // fold opportunity reappears.
793 def : Pat<(v2f64 (X86Movlpd VR128:$src1, VR128:$src2)),
794 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
795 def : Pat<(v2i64 (X86Movlpd VR128:$src1, VR128:$src2)),
796 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
797 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
798 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
799 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
800 (MOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>;
801 }
803 //===----------------------------------------------------------------------===//
804 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
805 //===----------------------------------------------------------------------===//
807 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
808 X86MemOperand x86memop, PatFrag ld_frag,
809 string asm, Domain d,
810 OpndItins itins,
811 bit IsReMaterializable = 1> {
812 let hasSideEffects = 0 in
813 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
814 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], itins.rr, d>,
815 Sched<[WriteFShuffle]>;
816 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
817 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
818 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
819 [(set RC:$dst, (ld_frag addr:$src))], itins.rm, d>,
820 Sched<[WriteLoad]>;
821 }
823 let Predicates = [HasAVX, NoVLX] in {
824 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
825 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
826 PS, VEX;
827 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
828 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
829 PD, VEX;
830 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
831 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
832 PS, VEX;
833 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
834 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
835 PD, VEX;
837 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
838 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
839 PS, VEX, VEX_L;
840 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
841 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
842 PD, VEX, VEX_L;
843 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
844 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
845 PS, VEX, VEX_L;
846 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
847 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
848 PD, VEX, VEX_L;
849 }
851 let Predicates = [UseSSE1] in {
852 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
853 "movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
854 PS;
855 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
856 "movups", SSEPackedSingle, SSE_MOVU_ITINS>,
857 PS;
858 }
859 let Predicates = [UseSSE2] in {
860 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
861 "movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
862 PD;
863 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
864 "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
865 PD;
866 }
868 let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in {
869 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
870 "movaps\t{$src, $dst|$dst, $src}",
871 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
872 IIC_SSE_MOVA_P_MR>, VEX;
873 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
874 "movapd\t{$src, $dst|$dst, $src}",
875 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
876 IIC_SSE_MOVA_P_MR>, VEX;
877 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
878 "movups\t{$src, $dst|$dst, $src}",
879 [(store (v4f32 VR128:$src), addr:$dst)],
880 IIC_SSE_MOVU_P_MR>, VEX;
881 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
882 "movupd\t{$src, $dst|$dst, $src}",
883 [(store (v2f64 VR128:$src), addr:$dst)],
884 IIC_SSE_MOVU_P_MR>, VEX;
885 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
886 "movaps\t{$src, $dst|$dst, $src}",
887 [(alignedstore256 (v8f32 VR256:$src), addr:$dst)],
888 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
889 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
890 "movapd\t{$src, $dst|$dst, $src}",
891 [(alignedstore256 (v4f64 VR256:$src), addr:$dst)],
892 IIC_SSE_MOVA_P_MR>, VEX, VEX_L;
893 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
894 "movups\t{$src, $dst|$dst, $src}",
895 [(store (v8f32 VR256:$src), addr:$dst)],
896 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
897 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
898 "movupd\t{$src, $dst|$dst, $src}",
899 [(store (v4f64 VR256:$src), addr:$dst)],
900 IIC_SSE_MOVU_P_MR>, VEX, VEX_L;
901 } // SchedRW
903 // For disassembler
904 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
905 SchedRW = [WriteFShuffle] in {
906 def VMOVAPSrr_REV : VPSI<0x29, MRMDestReg, (outs VR128:$dst),
907 (ins VR128:$src),
908 "movaps\t{$src, $dst|$dst, $src}", [],
909 IIC_SSE_MOVA_P_RR>, VEX;
910 def VMOVAPDrr_REV : VPDI<0x29, MRMDestReg, (outs VR128:$dst),
911 (ins VR128:$src),
912 "movapd\t{$src, $dst|$dst, $src}", [],
913 IIC_SSE_MOVA_P_RR>, VEX;
914 def VMOVUPSrr_REV : VPSI<0x11, MRMDestReg, (outs VR128:$dst),
915 (ins VR128:$src),
916 "movups\t{$src, $dst|$dst, $src}", [],
917 IIC_SSE_MOVU_P_RR>, VEX;
918 def VMOVUPDrr_REV : VPDI<0x11, MRMDestReg, (outs VR128:$dst),
919 (ins VR128:$src),
920 "movupd\t{$src, $dst|$dst, $src}", [],
921 IIC_SSE_MOVU_P_RR>, VEX;
922 def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst),
923 (ins VR256:$src),
924 "movaps\t{$src, $dst|$dst, $src}", [],
925 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
926 def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst),
927 (ins VR256:$src),
928 "movapd\t{$src, $dst|$dst, $src}", [],
929 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
930 def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst),
931 (ins VR256:$src),
932 "movups\t{$src, $dst|$dst, $src}", [],
933 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
934 def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst),
935 (ins VR256:$src),
936 "movupd\t{$src, $dst|$dst, $src}", [],
937 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
938 }
940 let Predicates = [HasAVX] in {
941 def : Pat<(v8i32 (X86vzmovl
942 (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
943 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
944 def : Pat<(v4i64 (X86vzmovl
945 (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
946 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
947 def : Pat<(v8f32 (X86vzmovl
948 (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
949 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
950 def : Pat<(v4f64 (X86vzmovl
951 (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
952 (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
953 }
956 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
957 (VMOVUPSYmr addr:$dst, VR256:$src)>;
958 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
959 (VMOVUPDYmr addr:$dst, VR256:$src)>;
961 let SchedRW = [WriteStore] in {
962 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
963 "movaps\t{$src, $dst|$dst, $src}",
964 [(alignedstore (v4f32 VR128:$src), addr:$dst)],
965 IIC_SSE_MOVA_P_MR>;
966 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
967 "movapd\t{$src, $dst|$dst, $src}",
968 [(alignedstore (v2f64 VR128:$src), addr:$dst)],
969 IIC_SSE_MOVA_P_MR>;
970 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
971 "movups\t{$src, $dst|$dst, $src}",
972 [(store (v4f32 VR128:$src), addr:$dst)],
973 IIC_SSE_MOVU_P_MR>;
974 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
975 "movupd\t{$src, $dst|$dst, $src}",
976 [(store (v2f64 VR128:$src), addr:$dst)],
977 IIC_SSE_MOVU_P_MR>;
978 } // SchedRW
980 // For disassembler
981 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
982 SchedRW = [WriteFShuffle] in {
983 def MOVAPSrr_REV : PSI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
984 "movaps\t{$src, $dst|$dst, $src}", [],
985 IIC_SSE_MOVA_P_RR>;
986 def MOVAPDrr_REV : PDI<0x29, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
987 "movapd\t{$src, $dst|$dst, $src}", [],
988 IIC_SSE_MOVA_P_RR>;
989 def MOVUPSrr_REV : PSI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
990 "movups\t{$src, $dst|$dst, $src}", [],
991 IIC_SSE_MOVU_P_RR>;
992 def MOVUPDrr_REV : PDI<0x11, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
993 "movupd\t{$src, $dst|$dst, $src}", [],
994 IIC_SSE_MOVU_P_RR>;
995 }
997 let Predicates = [HasAVX] in {
998 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
999 (VMOVUPSmr addr:$dst, VR128:$src)>;
1000 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1001 (VMOVUPDmr addr:$dst, VR128:$src)>;
1002 }
1004 let Predicates = [UseSSE1] in
1005 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
1006 (MOVUPSmr addr:$dst, VR128:$src)>;
1007 let Predicates = [UseSSE2] in
1008 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
1009 (MOVUPDmr addr:$dst, VR128:$src)>;
1011 // Use vmovaps/vmovups for AVX integer load/store.
1012 let Predicates = [HasAVX, NoVLX] in {
1013 // 128-bit load/store
1014 def : Pat<(alignedloadv2i64 addr:$src),
1015 (VMOVAPSrm addr:$src)>;
1016 def : Pat<(loadv2i64 addr:$src),
1017 (VMOVUPSrm addr:$src)>;
1019 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1020 (VMOVAPSmr addr:$dst, VR128:$src)>;
1021 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1022 (VMOVAPSmr addr:$dst, VR128:$src)>;
1023 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1024 (VMOVAPSmr addr:$dst, VR128:$src)>;
1025 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1026 (VMOVAPSmr addr:$dst, VR128:$src)>;
1027 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1028 (VMOVUPSmr addr:$dst, VR128:$src)>;
1029 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1030 (VMOVUPSmr addr:$dst, VR128:$src)>;
1031 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1032 (VMOVUPSmr addr:$dst, VR128:$src)>;
1033 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1034 (VMOVUPSmr addr:$dst, VR128:$src)>;
1036 // 256-bit load/store
1037 def : Pat<(alignedloadv4i64 addr:$src),
1038 (VMOVAPSYrm addr:$src)>;
1039 def : Pat<(loadv4i64 addr:$src),
1040 (VMOVUPSYrm addr:$src)>;
1041 def : Pat<(alignedstore256 (v4i64 VR256:$src), addr:$dst),
1042 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1043 def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst),
1044 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1045 def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst),
1046 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1047 def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst),
1048 (VMOVAPSYmr addr:$dst, VR256:$src)>;
1049 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
1050 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1051 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
1052 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1053 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
1054 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1055 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
1056 (VMOVUPSYmr addr:$dst, VR256:$src)>;
1058 // Special patterns for storing subvector extracts of lower 128-bits
1059 // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr
1060 def : Pat<(alignedstore (v2f64 (extract_subvector
1061 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1062 (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1063 def : Pat<(alignedstore (v4f32 (extract_subvector
1064 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1065 (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1066 def : Pat<(alignedstore (v2i64 (extract_subvector
1067 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1068 (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1069 def : Pat<(alignedstore (v4i32 (extract_subvector
1070 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1071 (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1072 def : Pat<(alignedstore (v8i16 (extract_subvector
1073 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1074 (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1075 def : Pat<(alignedstore (v16i8 (extract_subvector
1076 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1077 (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1079 def : Pat<(store (v2f64 (extract_subvector
1080 (v4f64 VR256:$src), (iPTR 0))), addr:$dst),
1081 (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1082 def : Pat<(store (v4f32 (extract_subvector
1083 (v8f32 VR256:$src), (iPTR 0))), addr:$dst),
1084 (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1085 def : Pat<(store (v2i64 (extract_subvector
1086 (v4i64 VR256:$src), (iPTR 0))), addr:$dst),
1087 (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1088 def : Pat<(store (v4i32 (extract_subvector
1089 (v8i32 VR256:$src), (iPTR 0))), addr:$dst),
1090 (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1091 def : Pat<(store (v8i16 (extract_subvector
1092 (v16i16 VR256:$src), (iPTR 0))), addr:$dst),
1093 (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1094 def : Pat<(store (v16i8 (extract_subvector
1095 (v32i8 VR256:$src), (iPTR 0))), addr:$dst),
1096 (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>;
1097 }
1099 // Use movaps / movups for SSE integer load / store (one byte shorter).
1100 // The instructions selected below are then converted to MOVDQA/MOVDQU
1101 // during the SSE domain pass.
1102 let Predicates = [UseSSE1] in {
1103 def : Pat<(alignedloadv2i64 addr:$src),
1104 (MOVAPSrm addr:$src)>;
1105 def : Pat<(loadv2i64 addr:$src),
1106 (MOVUPSrm addr:$src)>;
1108 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
1109 (MOVAPSmr addr:$dst, VR128:$src)>;
1110 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
1111 (MOVAPSmr addr:$dst, VR128:$src)>;
1112 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
1113 (MOVAPSmr addr:$dst, VR128:$src)>;
1114 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
1115 (MOVAPSmr addr:$dst, VR128:$src)>;
1116 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
1117 (MOVUPSmr addr:$dst, VR128:$src)>;
1118 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
1119 (MOVUPSmr addr:$dst, VR128:$src)>;
1120 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
1121 (MOVUPSmr addr:$dst, VR128:$src)>;
1122 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
1123 (MOVUPSmr addr:$dst, VR128:$src)>;
1124 }
1126 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1127 // bits are disregarded. FIXME: Set encoding to pseudo!
1128 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1129 let isCodeGenOnly = 1 in {
1130 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1131 "movaps\t{$src, $dst|$dst, $src}",
1132 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1133 IIC_SSE_MOVA_P_RM>, VEX;
1134 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1135 "movapd\t{$src, $dst|$dst, $src}",
1136 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1137 IIC_SSE_MOVA_P_RM>, VEX;
1138 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1139 "movaps\t{$src, $dst|$dst, $src}",
1140 [(set FR32:$dst, (alignedloadfsf32 addr:$src))],
1141 IIC_SSE_MOVA_P_RM>;
1142 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1143 "movapd\t{$src, $dst|$dst, $src}",
1144 [(set FR64:$dst, (alignedloadfsf64 addr:$src))],
1145 IIC_SSE_MOVA_P_RM>;
1146 }
1147 }
1149 //===----------------------------------------------------------------------===//
1150 // SSE 1 & 2 - Move Low packed FP Instructions
1151 //===----------------------------------------------------------------------===//
1153 multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
1154 string base_opc, string asm_opr,
1155 InstrItinClass itin> {
1156 def PSrm : PI<opc, MRMSrcMem,
1157 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1158 !strconcat(base_opc, "s", asm_opr),
1159 [(set VR128:$dst,
1160 (psnode VR128:$src1,
1161 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
1162 itin, SSEPackedSingle>, PS,
1163 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1165 def PDrm : PI<opc, MRMSrcMem,
1166 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
1167 !strconcat(base_opc, "d", asm_opr),
1168 [(set VR128:$dst, (v2f64 (pdnode VR128:$src1,
1169 (scalar_to_vector (loadf64 addr:$src2)))))],
1170 itin, SSEPackedDouble>, PD,
1171 Sched<[WriteFShuffleLd, ReadAfterLd]>;
1173 }
1175 multiclass sse12_mov_hilo_packed<bits<8>opc, SDNode psnode, SDNode pdnode,
1176 string base_opc, InstrItinClass itin> {
1177 defm V#NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1179 itin>, VEX_4V;
1181 let Constraints = "$src1 = $dst" in
1182 defm NAME : sse12_mov_hilo_packed_base<opc, psnode, pdnode, base_opc,
1183 "\t{$src2, $dst|$dst, $src2}",
1184 itin>;
1185 }
1187 let AddedComplexity = 20 in {
1188 defm MOVL : sse12_mov_hilo_packed<0x12, X86Movlps, X86Movlpd, "movlp",
1189 IIC_SSE_MOV_LH>;
1190 }
1192 let SchedRW = [WriteStore] in {
1193 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1194 "movlps\t{$src, $dst|$dst, $src}",
1195 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1196 (iPTR 0))), addr:$dst)],
1197 IIC_SSE_MOV_LH>, VEX;
1198 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1199 "movlpd\t{$src, $dst|$dst, $src}",
1200 [(store (f64 (vector_extract (v2f64 VR128:$src),
1201 (iPTR 0))), addr:$dst)],
1202 IIC_SSE_MOV_LH>, VEX;
1203 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1204 "movlps\t{$src, $dst|$dst, $src}",
1205 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
1206 (iPTR 0))), addr:$dst)],
1207 IIC_SSE_MOV_LH>;
1208 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1209 "movlpd\t{$src, $dst|$dst, $src}",
1210 [(store (f64 (vector_extract (v2f64 VR128:$src),
1211 (iPTR 0))), addr:$dst)],
1212 IIC_SSE_MOV_LH>;
1213 } // SchedRW
1215 let Predicates = [HasAVX] in {
1216 // Shuffle with VMOVLPS
1217 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1218 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1219 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1220 (VMOVLPSrm VR128:$src1, addr:$src2)>;
1222 // Shuffle with VMOVLPD
1223 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1224 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1225 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1226 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1227 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1228 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1229 (VMOVLPDrm VR128:$src1, addr:$src2)>;
1231 // Store patterns
1232 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1233 addr:$src1),
1234 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1235 def : Pat<(store (v4i32 (X86Movlps
1236 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
1237 (VMOVLPSmr addr:$src1, VR128:$src2)>;
1238 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1239 addr:$src1),
1240 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1241 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1242 addr:$src1),
1243 (VMOVLPDmr addr:$src1, VR128:$src2)>;
1244 }
1246 let Predicates = [UseSSE1] in {
1247 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
1248 def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)),
1249 (iPTR 0))), addr:$src1),
1250 (MOVLPSmr addr:$src1, VR128:$src2)>;
1252 // Shuffle with MOVLPS
1253 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
1254 (MOVLPSrm VR128:$src1, addr:$src2)>;
1255 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
1256 (MOVLPSrm VR128:$src1, addr:$src2)>;
1257 def : Pat<(X86Movlps VR128:$src1,
1258 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1259 (MOVLPSrm VR128:$src1, addr:$src2)>;
1261 // Store patterns
1262 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
1263 addr:$src1),
1264 (MOVLPSmr addr:$src1, VR128:$src2)>;
1265 def : Pat<(store (v4i32 (X86Movlps
1266 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
1267 addr:$src1),
1268 (MOVLPSmr addr:$src1, VR128:$src2)>;
1269 }
1271 let Predicates = [UseSSE2] in {
1272 // Shuffle with MOVLPD
1273 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1274 (MOVLPDrm VR128:$src1, addr:$src2)>;
1275 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1276 (MOVLPDrm VR128:$src1, addr:$src2)>;
1277 def : Pat<(v2f64 (X86Movsd VR128:$src1,
1278 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1279 (MOVLPDrm VR128:$src1, addr:$src2)>;
1281 // Store patterns
1282 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1283 addr:$src1),
1284 (MOVLPDmr addr:$src1, VR128:$src2)>;
1285 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1286 addr:$src1),
1287 (MOVLPDmr addr:$src1, VR128:$src2)>;
1288 }
1290 //===----------------------------------------------------------------------===//
1291 // SSE 1 & 2 - Move Hi packed FP Instructions
1292 //===----------------------------------------------------------------------===//
1294 let AddedComplexity = 20 in {
1295 defm MOVH : sse12_mov_hilo_packed<0x16, X86Movlhps, X86Movlhpd, "movhp",
1296 IIC_SSE_MOV_LH>;
1297 }
1299 let SchedRW = [WriteStore] in {
1300 // v2f64 extract element 1 is always custom lowered to unpack high to low
1301 // and extract element 0 so the non-store version isn't too horrible.
1302 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1303 "movhps\t{$src, $dst|$dst, $src}",
1304 [(store (f64 (vector_extract
1305 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1306 (bc_v2f64 (v4f32 VR128:$src))),
1307 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1308 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1309 "movhpd\t{$src, $dst|$dst, $src}",
1310 [(store (f64 (vector_extract
1311 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1312 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>, VEX;
1313 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1314 "movhps\t{$src, $dst|$dst, $src}",
1315 [(store (f64 (vector_extract
1316 (X86Unpckh (bc_v2f64 (v4f32 VR128:$src)),
1317 (bc_v2f64 (v4f32 VR128:$src))),
1318 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1319 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1320 "movhpd\t{$src, $dst|$dst, $src}",
1321 [(store (f64 (vector_extract
1322 (v2f64 (X86Unpckh VR128:$src, VR128:$src)),
1323 (iPTR 0))), addr:$dst)], IIC_SSE_MOV_LH>;
1324 } // SchedRW
1326 let Predicates = [HasAVX] in {
1327 // VMOVHPS patterns
1328 def : Pat<(X86Movlhps VR128:$src1,
1329 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1330 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1331 def : Pat<(X86Movlhps VR128:$src1,
1332 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1333 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1335 // VMOVHPD patterns
1337 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1338 // is during lowering, where it's not possible to recognize the load fold
1339 // cause it has two uses through a bitcast. One use disappears at isel time
1340 // and the fold opportunity reappears.
1341 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1342 (scalar_to_vector (loadf64 addr:$src2)))),
1343 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1344 // Also handle an i64 load because that may get selected as a faster way to
1345 // load the data.
1346 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1347 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1348 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1350 def : Pat<(store (f64 (vector_extract
1351 (v2f64 (X86VPermilpi VR128:$src, (i8 1))),
1352 (iPTR 0))), addr:$dst),
1353 (VMOVHPDmr addr:$dst, VR128:$src)>;
1354 }
1356 let Predicates = [UseSSE1] in {
1357 // MOVHPS patterns
1358 def : Pat<(X86Movlhps VR128:$src1,
1359 (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))),
1360 (MOVHPSrm VR128:$src1, addr:$src2)>;
1361 def : Pat<(X86Movlhps VR128:$src1,
1362 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1363 (MOVHPSrm VR128:$src1, addr:$src2)>;
1364 }
1366 let Predicates = [UseSSE2] in {
1367 // MOVHPD patterns
1369 // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem
1370 // is during lowering, where it's not possible to recognize the load fold
1371 // cause it has two uses through a bitcast. One use disappears at isel time
1372 // and the fold opportunity reappears.
1373 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1374 (scalar_to_vector (loadf64 addr:$src2)))),
1375 (MOVHPDrm VR128:$src1, addr:$src2)>;
1376 // Also handle an i64 load because that may get selected as a faster way to
1377 // load the data.
1378 def : Pat<(v2f64 (X86Unpckl VR128:$src1,
1379 (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src2)))))),
1380 (MOVHPDrm VR128:$src1, addr:$src2)>;
1382 def : Pat<(store (f64 (vector_extract
1383 (v2f64 (X86Shufp VR128:$src, VR128:$src, (i8 1))),
1384 (iPTR 0))), addr:$dst),
1385 (MOVHPDmr addr:$dst, VR128:$src)>;
1386 }
1388 //===----------------------------------------------------------------------===//
1389 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1390 //===----------------------------------------------------------------------===//
1392 let AddedComplexity = 20, Predicates = [UseAVX] in {
1393 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1394 (ins VR128:$src1, VR128:$src2),
1395 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1396 [(set VR128:$dst,
1397 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1398 IIC_SSE_MOV_LH>,
1399 VEX_4V, Sched<[WriteFShuffle]>;
1400 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1401 (ins VR128:$src1, VR128:$src2),
1402 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1403 [(set VR128:$dst,
1404 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1405 IIC_SSE_MOV_LH>,
1406 VEX_4V, Sched<[WriteFShuffle]>;
1407 }
1408 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1409 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1410 (ins VR128:$src1, VR128:$src2),
1411 "movlhps\t{$src2, $dst|$dst, $src2}",
1412 [(set VR128:$dst,
1413 (v4f32 (X86Movlhps VR128:$src1, VR128:$src2)))],
1414 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1415 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1416 (ins VR128:$src1, VR128:$src2),
1417 "movhlps\t{$src2, $dst|$dst, $src2}",
1418 [(set VR128:$dst,
1419 (v4f32 (X86Movhlps VR128:$src1, VR128:$src2)))],
1420 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
1421 }
1423 let Predicates = [UseAVX] in {
1424 // MOVLHPS patterns
1425 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1426 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1427 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1428 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1430 // MOVHLPS patterns
1431 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1432 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1433 }
1435 let Predicates = [UseSSE1] in {
1436 // MOVLHPS patterns
1437 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1438 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1439 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1440 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1442 // MOVHLPS patterns
1443 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1444 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1445 }
1447 //===----------------------------------------------------------------------===//
1448 // SSE 1 & 2 - Conversion Instructions
1449 //===----------------------------------------------------------------------===//
1451 def SSE_CVT_PD : OpndItins<
1452 IIC_SSE_CVT_PD_RR, IIC_SSE_CVT_PD_RM
1453 >;
1455 let Sched = WriteCvtI2F in
1456 def SSE_CVT_PS : OpndItins<
1457 IIC_SSE_CVT_PS_RR, IIC_SSE_CVT_PS_RM
1458 >;
1460 let Sched = WriteCvtI2F in
1461 def SSE_CVT_Scalar : OpndItins<
1462 IIC_SSE_CVT_Scalar_RR, IIC_SSE_CVT_Scalar_RM
1463 >;
1465 let Sched = WriteCvtF2I in
1466 def SSE_CVT_SS2SI_32 : OpndItins<
1467 IIC_SSE_CVT_SS2SI32_RR, IIC_SSE_CVT_SS2SI32_RM
1468 >;
1470 let Sched = WriteCvtF2I in
1471 def SSE_CVT_SS2SI_64 : OpndItins<
1472 IIC_SSE_CVT_SS2SI64_RR, IIC_SSE_CVT_SS2SI64_RM
1473 >;
1475 let Sched = WriteCvtF2I in
1476 def SSE_CVT_SD2SI : OpndItins<
1477 IIC_SSE_CVT_SD2SI_RR, IIC_SSE_CVT_SD2SI_RM
1478 >;
1480 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1481 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1482 string asm, OpndItins itins> {
1483 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1484 [(set DstRC:$dst, (OpNode SrcRC:$src))],
1485 itins.rr>, Sched<[itins.Sched]>;
1486 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1487 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))],
1488 itins.rm>, Sched<[itins.Sched.Folded]>;
1489 }
1491 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1492 X86MemOperand x86memop, string asm, Domain d,
1493 OpndItins itins> {
1494 let hasSideEffects = 0 in {
1495 def rr : I<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1496 [], itins.rr, d>, Sched<[itins.Sched]>;
1497 let mayLoad = 1 in
1498 def rm : I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1499 [], itins.rm, d>, Sched<[itins.Sched.Folded]>;
1500 }
1501 }
1503 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1504 X86MemOperand x86memop, string asm> {
1505 let hasSideEffects = 0, Predicates = [UseAVX] in {
1506 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1507 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1508 Sched<[WriteCvtI2F]>;
1509 let mayLoad = 1 in
1510 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1511 (ins DstRC:$src1, x86memop:$src),
1512 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
1513 Sched<[WriteCvtI2FLd, ReadAfterLd]>;
1514 } // hasSideEffects = 0
1515 }
1517 let Predicates = [UseAVX] in {
1518 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1519 "cvttss2si\t{$src, $dst|$dst, $src}",
1520 SSE_CVT_SS2SI_32>,
1521 XS, VEX, VEX_LIG;
1522 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1523 "cvttss2si\t{$src, $dst|$dst, $src}",
1524 SSE_CVT_SS2SI_64>,
1525 XS, VEX, VEX_W, VEX_LIG;
1526 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1527 "cvttsd2si\t{$src, $dst|$dst, $src}",
1528 SSE_CVT_SD2SI>,
1529 XD, VEX, VEX_LIG;
1530 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1531 "cvttsd2si\t{$src, $dst|$dst, $src}",
1532 SSE_CVT_SD2SI>,
1533 XD, VEX, VEX_W, VEX_LIG;
1535 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1536 (VCVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1537 def : InstAlias<"vcvttss2si{l}\t{$src, $dst|$dst, $src}",
1538 (VCVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1539 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1540 (VCVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1541 def : InstAlias<"vcvttsd2si{l}\t{$src, $dst|$dst, $src}",
1542 (VCVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1543 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1544 (VCVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1545 def : InstAlias<"vcvttss2si{q}\t{$src, $dst|$dst, $src}",
1546 (VCVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1547 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1548 (VCVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1549 def : InstAlias<"vcvttsd2si{q}\t{$src, $dst|$dst, $src}",
1550 (VCVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1551 }
1552 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1553 // register, but the same isn't true when only using memory operands,
1554 // provide other assembly "l" and "q" forms to address this explicitly
1555 // where appropriate to do so.
1556 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}">,
1557 XS, VEX_4V, VEX_LIG;
1558 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">,
1559 XS, VEX_4V, VEX_W, VEX_LIG;
1560 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">,
1561 XD, VEX_4V, VEX_LIG;
1562 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">,
1563 XD, VEX_4V, VEX_W, VEX_LIG;
1565 let Predicates = [UseAVX] in {
1566 def : InstAlias<"vcvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1567 (VCVTSI2SSrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1568 def : InstAlias<"vcvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1569 (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src), 0>;
1571 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1572 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1573 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1574 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1575 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1576 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1577 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1578 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1580 def : Pat<(f32 (sint_to_fp GR32:$src)),
1581 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1582 def : Pat<(f32 (sint_to_fp GR64:$src)),
1583 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1584 def : Pat<(f64 (sint_to_fp GR32:$src)),
1585 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1586 def : Pat<(f64 (sint_to_fp GR64:$src)),
1587 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1588 }
1590 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1591 "cvttss2si\t{$src, $dst|$dst, $src}",
1592 SSE_CVT_SS2SI_32>, XS;
1593 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1594 "cvttss2si\t{$src, $dst|$dst, $src}",
1595 SSE_CVT_SS2SI_64>, XS, REX_W;
1596 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1597 "cvttsd2si\t{$src, $dst|$dst, $src}",
1598 SSE_CVT_SD2SI>, XD;
1599 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1600 "cvttsd2si\t{$src, $dst|$dst, $src}",
1601 SSE_CVT_SD2SI>, XD, REX_W;
1602 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1603 "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
1604 SSE_CVT_Scalar>, XS;
1605 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1606 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
1607 SSE_CVT_Scalar>, XS, REX_W;
1608 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1609 "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
1610 SSE_CVT_Scalar>, XD;
1611 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1612 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
1613 SSE_CVT_Scalar>, XD, REX_W;
1615 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1616 (CVTTSS2SIrr GR32:$dst, FR32:$src), 0>;
1617 def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}",
1618 (CVTTSS2SIrm GR32:$dst, f32mem:$src), 0>;
1619 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1620 (CVTTSD2SIrr GR32:$dst, FR64:$src), 0>;
1621 def : InstAlias<"cvttsd2si{l}\t{$src, $dst|$dst, $src}",
1622 (CVTTSD2SIrm GR32:$dst, f64mem:$src), 0>;
1623 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1624 (CVTTSS2SI64rr GR64:$dst, FR32:$src), 0>;
1625 def : InstAlias<"cvttss2si{q}\t{$src, $dst|$dst, $src}",
1626 (CVTTSS2SI64rm GR64:$dst, f32mem:$src), 0>;
1627 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1628 (CVTTSD2SI64rr GR64:$dst, FR64:$src), 0>;
1629 def : InstAlias<"cvttsd2si{q}\t{$src, $dst|$dst, $src}",
1630 (CVTTSD2SI64rm GR64:$dst, f64mem:$src), 0>;
1632 def : InstAlias<"cvtsi2ss\t{$src, $dst|$dst, $src}",
1633 (CVTSI2SSrm FR64:$dst, i32mem:$src), 0>;
1634 def : InstAlias<"cvtsi2sd\t{$src, $dst|$dst, $src}",
1635 (CVTSI2SDrm FR64:$dst, i32mem:$src), 0>;
1637 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1638 // and/or XMM operand(s).
1640 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1641 Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
1642 string asm, OpndItins itins> {
1643 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1644 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1645 [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
1646 Sched<[itins.Sched]>;
1647 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
1648 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1649 [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
1650 Sched<[itins.Sched.Folded]>;
1651 }
1653 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1654 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1655 PatFrag ld_frag, string asm, OpndItins itins,
1656 bit Is2Addr = 1> {
1657 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1658 !if(Is2Addr,
1659 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1660 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1661 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
1662 itins.rr>, Sched<[itins.Sched]>;
1663 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1664 (ins DstRC:$src1, x86memop:$src2),
1665 !if(Is2Addr,
1666 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1667 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1668 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
1669 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
1670 }
1672 let Predicates = [UseAVX] in {
1673 defm VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32,
1674 int_x86_sse2_cvtsd2si, sdmem, sse_load_f64, "cvtsd2si",
1675 SSE_CVT_SD2SI>, XD, VEX, VEX_LIG;
1676 defm VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1677 int_x86_sse2_cvtsd2si64, sdmem, sse_load_f64, "cvtsd2si",
1678 SSE_CVT_SD2SI>, XD, VEX, VEX_W, VEX_LIG;
1679 }
1680 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1681 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD;
1682 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1683 sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1686 let isCodeGenOnly = 1 in {
1687 let Predicates = [UseAVX] in {
1688 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1689 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
1690 SSE_CVT_Scalar, 0>, XS, VEX_4V;
1691 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1692 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
1693 SSE_CVT_Scalar, 0>, XS, VEX_4V,
1694 VEX_W;
1695 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1696 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
1697 SSE_CVT_Scalar, 0>, XD, VEX_4V;
1698 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1699 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
1700 SSE_CVT_Scalar, 0>, XD,
1701 VEX_4V, VEX_W;
1702 }
1703 let Constraints = "$src1 = $dst" in {
1704 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1705 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1706 "cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
1707 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1708 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1709 "cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
1710 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1711 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1712 "cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
1713 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1714 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1715 "cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
1716 }
1717 } // isCodeGenOnly = 1
1719 /// SSE 1 Only
1721 // Aliases for intrinsics
1722 let isCodeGenOnly = 1 in {
1723 let Predicates = [UseAVX] in {
1724 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1725 ssmem, sse_load_f32, "cvttss2si",
1726 SSE_CVT_SS2SI_32>, XS, VEX;
1727 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1728 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1729 "cvttss2si", SSE_CVT_SS2SI_64>,
1730 XS, VEX, VEX_W;
1731 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1732 sdmem, sse_load_f64, "cvttsd2si",
1733 SSE_CVT_SD2SI>, XD, VEX;
1734 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1735 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1736 "cvttsd2si", SSE_CVT_SD2SI>,
1737 XD, VEX, VEX_W;
1738 }
1739 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1740 ssmem, sse_load_f32, "cvttss2si",
1741 SSE_CVT_SS2SI_32>, XS;
1742 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1743 int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
1744 "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
1745 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1746 sdmem, sse_load_f64, "cvttsd2si",
1747 SSE_CVT_SD2SI>, XD;
1748 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1749 int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
1750 "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
1751 } // isCodeGenOnly = 1
1753 let Predicates = [UseAVX] in {
1754 defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1755 ssmem, sse_load_f32, "cvtss2si",
1756 SSE_CVT_SS2SI_32>, XS, VEX, VEX_LIG;
1757 defm VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1758 ssmem, sse_load_f32, "cvtss2si",
1759 SSE_CVT_SS2SI_64>, XS, VEX, VEX_W, VEX_LIG;
1760 }
1761 defm CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
1762 ssmem, sse_load_f32, "cvtss2si",
1763 SSE_CVT_SS2SI_32>, XS;
1764 defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
1765 ssmem, sse_load_f32, "cvtss2si",
1766 SSE_CVT_SS2SI_64>, XS, REX_W;
1768 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1769 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1770 SSEPackedSingle, SSE_CVT_PS>,
1771 PS, VEX, Requires<[HasAVX]>;
1772 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
1773 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1774 SSEPackedSingle, SSE_CVT_PS>,
1775 PS, VEX, VEX_L, Requires<[HasAVX]>;
1777 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
1778 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1779 SSEPackedSingle, SSE_CVT_PS>,
1780 PS, Requires<[UseSSE2]>;
1782 let Predicates = [UseAVX] in {
1783 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1784 (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1785 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
1786 (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1787 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1788 (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1789 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
1790 (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1791 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1792 (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1793 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
1794 (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1795 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1796 (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1797 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
1798 (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
1799 }
1801 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1802 (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
1803 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
1804 (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
1805 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1806 (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
1807 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
1808 (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
1809 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1810 (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
1811 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
1812 (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
1813 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1814 (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
1815 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
1816 (CVTSD2SI64rm GR64:$dst, sdmem:$src)>;
1818 /// SSE 2 Only
1820 // Convert scalar double to scalar single
1821 let hasSideEffects = 0, Predicates = [UseAVX] in {
1822 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1823 (ins FR64:$src1, FR64:$src2),
1824 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
1825 IIC_SSE_CVT_Scalar_RR>, VEX_4V, VEX_LIG,
1826 Sched<[WriteCvtF2F]>;
1827 let mayLoad = 1 in
1828 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1829 (ins FR64:$src1, f64mem:$src2),
1830 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1831 [], IIC_SSE_CVT_Scalar_RM>,
1832 XD, Requires<[HasAVX, OptForSize]>, VEX_4V, VEX_LIG,
1833 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1834 }
1836 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1837 Requires<[UseAVX]>;
1839 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1840 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1841 [(set FR32:$dst, (fround FR64:$src))],
1842 IIC_SSE_CVT_Scalar_RR>, Sched<[WriteCvtF2F]>;
1843 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1844 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1845 [(set FR32:$dst, (fround (loadf64 addr:$src)))],
1846 IIC_SSE_CVT_Scalar_RM>,
1847 XD,
1848 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1850 let isCodeGenOnly = 1 in {
1851 def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
1852 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1853 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1854 [(set VR128:$dst,
1855 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1856 IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, Requires<[UseAVX]>,
1857 Sched<[WriteCvtF2F]>;
1858 def Int_VCVTSD2SSrm: I<0x5A, MRMSrcReg,
1859 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1860 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1861 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1862 VR128:$src1, sse_load_f64:$src2))],
1863 IIC_SSE_CVT_Scalar_RM>, XD, VEX_4V, Requires<[UseAVX]>,
1864 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1866 let Constraints = "$src1 = $dst" in {
1867 def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
1868 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1869 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1870 [(set VR128:$dst,
1871 (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
1872 IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
1873 Sched<[WriteCvtF2F]>;
1874 def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
1875 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
1876 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
1877 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
1878 VR128:$src1, sse_load_f64:$src2))],
1879 IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
1880 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1881 }
1882 } // isCodeGenOnly = 1
1884 // Convert scalar single to scalar double
1885 // SSE2 instructions with XS prefix
1886 let hasSideEffects = 0, Predicates = [UseAVX] in {
1887 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1888 (ins FR32:$src1, FR32:$src2),
1889 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1890 [], IIC_SSE_CVT_Scalar_RR>,
1891 XS, Requires<[HasAVX]>, VEX_4V, VEX_LIG,
1892 Sched<[WriteCvtF2F]>;
1893 let mayLoad = 1 in
1894 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1895 (ins FR32:$src1, f32mem:$src2),
1896 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1897 [], IIC_SSE_CVT_Scalar_RM>,
1898 XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>,
1899 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1900 }
1902 def : Pat<(f64 (fextend FR32:$src)),
1903 (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[UseAVX]>;
1904 def : Pat<(fextend (loadf32 addr:$src)),
1905 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[UseAVX]>;
1907 def : Pat<(extloadf32 addr:$src),
1908 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>,
1909 Requires<[UseAVX, OptForSize]>;
1910 def : Pat<(extloadf32 addr:$src),
1911 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>,
1912 Requires<[UseAVX, OptForSpeed]>;
1914 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1915 "cvtss2sd\t{$src, $dst|$dst, $src}",
1916 [(set FR64:$dst, (fextend FR32:$src))],
1917 IIC_SSE_CVT_Scalar_RR>, XS,
1918 Requires<[UseSSE2]>, Sched<[WriteCvtF2F]>;
1919 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1920 "cvtss2sd\t{$src, $dst|$dst, $src}",
1921 [(set FR64:$dst, (extloadf32 addr:$src))],
1922 IIC_SSE_CVT_Scalar_RM>, XS,
1923 Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
1925 // extload f32 -> f64. This matches load+fextend because we have a hack in
1926 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1927 // combine.
1928 // Since these loads aren't folded into the fextend, we have to match it
1929 // explicitly here.
1930 def : Pat<(fextend (loadf32 addr:$src)),
1931 (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>;
1932 def : Pat<(extloadf32 addr:$src),
1933 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
1935 let isCodeGenOnly = 1 in {
1936 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1937 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1938 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1939 [(set VR128:$dst,
1940 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1941 IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, Requires<[UseAVX]>,
1942 Sched<[WriteCvtF2F]>;
1943 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1944 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1945 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1946 [(set VR128:$dst,
1947 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1948 IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, Requires<[UseAVX]>,
1949 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1950 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1951 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1952 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1953 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1954 [(set VR128:$dst,
1955 (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
1956 IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
1957 Sched<[WriteCvtF2F]>;
1958 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1959 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
1960 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1961 [(set VR128:$dst,
1962 (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))],
1963 IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
1964 Sched<[WriteCvtF2FLd, ReadAfterLd]>;
1965 }
1966 } // isCodeGenOnly = 1
1968 // Convert packed single/double fp to doubleword
1969 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1970 "cvtps2dq\t{$src, $dst|$dst, $src}",
1971 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1972 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
1973 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1974 "cvtps2dq\t{$src, $dst|$dst, $src}",
1975 [(set VR128:$dst,
1976 (int_x86_sse2_cvtps2dq (loadv4f32 addr:$src)))],
1977 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
1978 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1979 "cvtps2dq\t{$src, $dst|$dst, $src}",
1980 [(set VR256:$dst,
1981 (int_x86_avx_cvt_ps2dq_256 VR256:$src))],
1982 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
1983 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1984 "cvtps2dq\t{$src, $dst|$dst, $src}",
1985 [(set VR256:$dst,
1986 (int_x86_avx_cvt_ps2dq_256 (loadv8f32 addr:$src)))],
1987 IIC_SSE_CVT_PS_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
1988 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1989 "cvtps2dq\t{$src, $dst|$dst, $src}",
1990 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))],
1991 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
1992 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1993 "cvtps2dq\t{$src, $dst|$dst, $src}",
1994 [(set VR128:$dst,
1995 (int_x86_sse2_cvtps2dq (memopv4f32 addr:$src)))],
1996 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
1999 // Convert Packed Double FP to Packed DW Integers
2000 let Predicates = [HasAVX] in {
2001 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2002 // register, but the same isn't true when using memory operands instead.
2003 // Provide other assembly rr and rm forms to address this explicitly.
2004 def VCVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2005 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
2006 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
2007 VEX, Sched<[WriteCvtF2I]>;
2009 // XMM only
2010 def : InstAlias<"vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2011 (VCVTPD2DQrr VR128:$dst, VR128:$src), 0>;
2012 def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2013 "vcvtpd2dqx\t{$src, $dst|$dst, $src}",
2014 [(set VR128:$dst,
2015 (int_x86_sse2_cvtpd2dq (loadv2f64 addr:$src)))]>, VEX,
2016 Sched<[WriteCvtF2ILd]>;
2018 // YMM only
2019 def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2020 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2021 [(set VR128:$dst,
2022 (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L,
2023 Sched<[WriteCvtF2I]>;
2024 def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2025 "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}",
2026 [(set VR128:$dst,
2027 (int_x86_avx_cvt_pd2dq_256 (loadv4f64 addr:$src)))]>,
2028 VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2029 def : InstAlias<"vcvtpd2dq\t{$src, $dst|$dst, $src}",
2030 (VCVTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2031 }
2033 def CVTPD2DQrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2034 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2035 [(set VR128:$dst,
2036 (int_x86_sse2_cvtpd2dq (memopv2f64 addr:$src)))],
2037 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2ILd]>;
2038 def CVTPD2DQrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2039 "cvtpd2dq\t{$src, $dst|$dst, $src}",
2040 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))],
2041 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2043 // Convert with truncation packed single/double fp to doubleword
2044 // SSE2 packed instructions with XS prefix
2045 def VCVTTPS2DQrr : VS2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2046 "cvttps2dq\t{$src, $dst|$dst, $src}",
2047 [(set VR128:$dst,
2048 (int_x86_sse2_cvttps2dq VR128:$src))],
2049 IIC_SSE_CVT_PS_RR>, VEX, Sched<[WriteCvtF2I]>;
2050 def VCVTTPS2DQrm : VS2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2051 "cvttps2dq\t{$src, $dst|$dst, $src}",
2052 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
2053 (loadv4f32 addr:$src)))],
2054 IIC_SSE_CVT_PS_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2055 def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2056 "cvttps2dq\t{$src, $dst|$dst, $src}",
2057 [(set VR256:$dst,
2058 (int_x86_avx_cvtt_ps2dq_256 VR256:$src))],
2059 IIC_SSE_CVT_PS_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2060 def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2061 "cvttps2dq\t{$src, $dst|$dst, $src}",
2062 [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256
2063 (loadv8f32 addr:$src)))],
2064 IIC_SSE_CVT_PS_RM>, VEX, VEX_L,
2065 Sched<[WriteCvtF2ILd]>;
2067 def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2068 "cvttps2dq\t{$src, $dst|$dst, $src}",
2069 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))],
2070 IIC_SSE_CVT_PS_RR>, Sched<[WriteCvtF2I]>;
2071 def CVTTPS2DQrm : S2SI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2072 "cvttps2dq\t{$src, $dst|$dst, $src}",
2073 [(set VR128:$dst,
2074 (int_x86_sse2_cvttps2dq (memopv4f32 addr:$src)))],
2075 IIC_SSE_CVT_PS_RM>, Sched<[WriteCvtF2ILd]>;
2077 let Predicates = [HasAVX] in {
2078 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2079 (VCVTDQ2PSrr VR128:$src)>;
2080 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2081 (VCVTDQ2PSrm addr:$src)>;
2083 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2084 (VCVTDQ2PSrr VR128:$src)>;
2085 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (loadv2i64 addr:$src))),
2086 (VCVTDQ2PSrm addr:$src)>;
2088 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2089 (VCVTTPS2DQrr VR128:$src)>;
2090 def : Pat<(v4i32 (fp_to_sint (loadv4f32 addr:$src))),
2091 (VCVTTPS2DQrm addr:$src)>;
2093 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
2094 (VCVTDQ2PSYrr VR256:$src)>;
2095 def : Pat<(v8f32 (sint_to_fp (bc_v8i32 (loadv4i64 addr:$src)))),
2096 (VCVTDQ2PSYrm addr:$src)>;
2098 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
2099 (VCVTTPS2DQYrr VR256:$src)>;
2100 def : Pat<(v8i32 (fp_to_sint (loadv8f32 addr:$src))),
2101 (VCVTTPS2DQYrm addr:$src)>;
2102 }
2104 let Predicates = [UseSSE2] in {
2105 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
2106 (CVTDQ2PSrr VR128:$src)>;
2107 def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))),
2108 (CVTDQ2PSrm addr:$src)>;
2110 def : Pat<(int_x86_sse2_cvtdq2ps VR128:$src),
2111 (CVTDQ2PSrr VR128:$src)>;
2112 def : Pat<(int_x86_sse2_cvtdq2ps (bc_v4i32 (memopv2i64 addr:$src))),
2113 (CVTDQ2PSrm addr:$src)>;
2115 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
2116 (CVTTPS2DQrr VR128:$src)>;
2117 def : Pat<(v4i32 (fp_to_sint (memopv4f32 addr:$src))),
2118 (CVTTPS2DQrm addr:$src)>;
2119 }
2121 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2122 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2123 [(set VR128:$dst,
2124 (int_x86_sse2_cvttpd2dq VR128:$src))],
2125 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2I]>;
2127 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2128 // register, but the same isn't true when using memory operands instead.
2129 // Provide other assembly rr and rm forms to address this explicitly.
2131 // XMM only
2132 def : InstAlias<"vcvttpd2dqx\t{$src, $dst|$dst, $src}",
2133 (VCVTTPD2DQrr VR128:$dst, VR128:$src), 0>;
2134 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2135 "cvttpd2dqx\t{$src, $dst|$dst, $src}",
2136 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2137 (loadv2f64 addr:$src)))],
2138 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2ILd]>;
2140 // YMM only
2141 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2142 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2143 [(set VR128:$dst,
2144 (int_x86_avx_cvtt_pd2dq_256 VR256:$src))],
2145 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2I]>;
2146 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2147 "cvttpd2dq{y}\t{$src, $dst|$dst, $src}",
2148 [(set VR128:$dst,
2149 (int_x86_avx_cvtt_pd2dq_256 (loadv4f64 addr:$src)))],
2150 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2ILd]>;
2151 def : InstAlias<"vcvttpd2dq\t{$src, $dst|$dst, $src}",
2152 (VCVTTPD2DQYrr VR128:$dst, VR256:$src), 0>;
2154 let Predicates = [HasAVX] in {
2155 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
2156 (VCVTTPD2DQYrr VR256:$src)>;
2157 def : Pat<(v4i32 (fp_to_sint (loadv4f64 addr:$src))),
2158 (VCVTTPD2DQYrm addr:$src)>;
2159 } // Predicates = [HasAVX]
2161 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2162 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2163 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))],
2164 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2I]>;
2165 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
2166 "cvttpd2dq\t{$src, $dst|$dst, $src}",
2167 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
2168 (memopv2f64 addr:$src)))],
2169 IIC_SSE_CVT_PD_RM>,
2170 Sched<[WriteCvtF2ILd]>;
2172 // Convert packed single to packed double
2173 let Predicates = [HasAVX] in {
2174 // SSE2 instructions without OpSize prefix
2175 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2176 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2177 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2178 IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
2179 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2180 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2181 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2182 IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
2183 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2184 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2185 [(set VR256:$dst,
2186 (int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
2187 IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2188 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
2189 "vcvtps2pd\t{$src, $dst|$dst, $src}",
2190 [(set VR256:$dst,
2191 (int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
2192 IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2193 }
2195 let Predicates = [UseSSE2] in {
2196 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2197 "cvtps2pd\t{$src, $dst|$dst, $src}",
2198 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
2199 IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
2200 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2201 "cvtps2pd\t{$src, $dst|$dst, $src}",
2202 [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
2203 IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
2204 }
2206 // Convert Packed DW Integers to Packed Double FP
2207 let Predicates = [HasAVX] in {
2208 let hasSideEffects = 0, mayLoad = 1 in
2209 def VCVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2210 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2211 []>, VEX, Sched<[WriteCvtI2FLd]>;
2212 def VCVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2213 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2214 [(set VR128:$dst,
2215 (int_x86_sse2_cvtdq2pd VR128:$src))]>, VEX,
2216 Sched<[WriteCvtI2F]>;
2217 def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src),
2218 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2219 [(set VR256:$dst,
2220 (int_x86_avx_cvtdq2_pd_256
2221 (bitconvert (loadv2i64 addr:$src))))]>, VEX, VEX_L,
2222 Sched<[WriteCvtI2FLd]>;
2223 def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
2224 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
2225 [(set VR256:$dst,
2226 (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L,
2227 Sched<[WriteCvtI2F]>;
2228 }
2230 let hasSideEffects = 0, mayLoad = 1 in
2231 def CVTDQ2PDrm : S2SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2232 "cvtdq2pd\t{$src, $dst|$dst, $src}", [],
2233 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtI2FLd]>;
2234 def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2235 "cvtdq2pd\t{$src, $dst|$dst, $src}",
2236 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
2237 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
2239 // AVX 256-bit register conversion intrinsics
2240 let Predicates = [HasAVX] in {
2241 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
2242 (VCVTDQ2PDYrr VR128:$src)>;
2243 def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
2244 (VCVTDQ2PDYrm addr:$src)>;
2245 } // Predicates = [HasAVX]
2247 // Convert packed double to packed single
2248 // The assembler can recognize rr 256-bit instructions by seeing a ymm
2249 // register, but the same isn't true when using memory operands instead.
2250 // Provide other assembly rr and rm forms to address this explicitly.
2251 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2252 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2253 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2254 IIC_SSE_CVT_PD_RR>, VEX, Sched<[WriteCvtF2F]>;
2256 // XMM only
2257 def : InstAlias<"vcvtpd2psx\t{$src, $dst|$dst, $src}",
2258 (VCVTPD2PSrr VR128:$dst, VR128:$src), 0>;
2259 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2260 "cvtpd2psx\t{$src, $dst|$dst, $src}",
2261 [(set VR128:$dst,
2262 (int_x86_sse2_cvtpd2ps (loadv2f64 addr:$src)))],
2263 IIC_SSE_CVT_PD_RM>, VEX, Sched<[WriteCvtF2FLd]>;
2265 // YMM only
2266 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
2267 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2268 [(set VR128:$dst,
2269 (int_x86_avx_cvt_pd2_ps_256 VR256:$src))],
2270 IIC_SSE_CVT_PD_RR>, VEX, VEX_L, Sched<[WriteCvtF2F]>;
2271 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
2272 "cvtpd2ps{y}\t{$src, $dst|$dst, $src}",
2273 [(set VR128:$dst,
2274 (int_x86_avx_cvt_pd2_ps_256 (loadv4f64 addr:$src)))],
2275 IIC_SSE_CVT_PD_RM>, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
2276 def : InstAlias<"vcvtpd2ps\t{$src, $dst|$dst, $src}",
2277 (VCVTPD2PSYrr VR128:$dst, VR256:$src), 0>;
2279 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2280 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2281 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))],
2282 IIC_SSE_CVT_PD_RR>, Sched<[WriteCvtF2F]>;
2283 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2284 "cvtpd2ps\t{$src, $dst|$dst, $src}",
2285 [(set VR128:$dst,
2286 (int_x86_sse2_cvtpd2ps (memopv2f64 addr:$src)))],
2287 IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtF2FLd]>;
2290 // AVX 256-bit register conversion intrinsics
2291 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
2292 // whenever possible to avoid declaring two versions of each one.
2293 let Predicates = [HasAVX] in {
2294 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
2295 (VCVTDQ2PSYrr VR256:$src)>;
2296 def : Pat<(int_x86_avx_cvtdq2_ps_256 (bitconvert (loadv4i64 addr:$src))),
2297 (VCVTDQ2PSYrm addr:$src)>;
2299 // Match fround and fextend for 128/256-bit conversions
2300 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2301 (VCVTPD2PSrr VR128:$src)>;
2302 def : Pat<(v4f32 (X86vfpround (loadv2f64 addr:$src))),
2303 (VCVTPD2PSXrm addr:$src)>;
2304 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
2305 (VCVTPD2PSYrr VR256:$src)>;
2306 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
2307 (VCVTPD2PSYrm addr:$src)>;
2309 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2310 (VCVTPS2PDrr VR128:$src)>;
2311 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
2312 (VCVTPS2PDYrr VR128:$src)>;
2313 def : Pat<(v4f64 (extloadv4f32 addr:$src)),
2314 (VCVTPS2PDYrm addr:$src)>;
2315 }
2317 let Predicates = [UseSSE2] in {
2318 // Match fround and fextend for 128 conversions
2319 def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))),
2320 (CVTPD2PSrr VR128:$src)>;
2321 def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))),
2322 (CVTPD2PSrm addr:$src)>;
2324 def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))),
2325 (CVTPS2PDrr VR128:$src)>;
2326 }
2328 //===----------------------------------------------------------------------===//
2329 // SSE 1 & 2 - Compare Instructions
2330 //===----------------------------------------------------------------------===//
2332 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
2333 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
2334 Operand CC, SDNode OpNode, ValueType VT,
2335 PatFrag ld_frag, string asm, string asm_alt,
2336 OpndItins itins> {
2337 def rr : SIi8<0xC2, MRMSrcReg,
2338 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2339 [(set RC:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
2340 itins.rr>, Sched<[itins.Sched]>;
2341 def rm : SIi8<0xC2, MRMSrcMem,
2342 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2343 [(set RC:$dst, (OpNode (VT RC:$src1),
2344 (ld_frag addr:$src2), imm:$cc))],
2345 itins.rm>,
2346 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2348 // Accept explicit immediate argument form instead of comparison code.
2349 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2350 def rr_alt : SIi8<0xC2, MRMSrcReg, (outs RC:$dst),
2351 (ins RC:$src1, RC:$src2, i8imm:$cc), asm_alt, [],
2352 IIC_SSE_ALU_F32S_RR>, Sched<[itins.Sched]>;
2353 let mayLoad = 1 in
2354 def rm_alt : SIi8<0xC2, MRMSrcMem, (outs RC:$dst),
2355 (ins RC:$src1, x86memop:$src2, i8imm:$cc), asm_alt, [],
2356 IIC_SSE_ALU_F32S_RM>,
2357 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2358 }
2359 }
2361 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem, AVXCC, X86cmps, f32, loadf32,
2362 "cmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2363 "cmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2364 SSE_ALU_F32S>,
2365 XS, VEX_4V, VEX_LIG;
2366 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem, AVXCC, X86cmps, f64, loadf64,
2367 "cmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2368 "cmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2369 SSE_ALU_F32S>, // same latency as 32 bit compare
2370 XD, VEX_4V, VEX_LIG;
2372 let Constraints = "$src1 = $dst" in {
2373 defm CMPSS : sse12_cmp_scalar<FR32, f32mem, SSECC, X86cmps, f32, loadf32,
2374 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
2375 "cmpss\t{$cc, $src2, $dst|$dst, $src2, $cc}", SSE_ALU_F32S>,
2376 XS;
2377 defm CMPSD : sse12_cmp_scalar<FR64, f64mem, SSECC, X86cmps, f64, loadf64,
2378 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
2379 "cmpsd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2380 SSE_ALU_F64S>,
2381 XD;
2382 }
2384 multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
2385 Intrinsic Int, string asm, OpndItins itins> {
2386 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
2387 (ins VR128:$src1, VR128:$src, CC:$cc), asm,
2388 [(set VR128:$dst, (Int VR128:$src1,
2389 VR128:$src, imm:$cc))],
2390 itins.rr>,
2391 Sched<[itins.Sched]>;
2392 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
2393 (ins VR128:$src1, x86memop:$src, CC:$cc), asm,
2394 [(set VR128:$dst, (Int VR128:$src1,
2395 (load addr:$src), imm:$cc))],
2396 itins.rm>,
2397 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2398 }
2400 let isCodeGenOnly = 1 in {
2401 // Aliases to match intrinsics which expect XMM operand(s).
2402 defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
2403 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
2404 SSE_ALU_F32S>,
2405 XS, VEX_4V;
2406 defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
2407 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
2408 SSE_ALU_F32S>, // same latency as f32
2409 XD, VEX_4V;
2410 let Constraints = "$src1 = $dst" in {
2411 defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
2412 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
2413 SSE_ALU_F32S>, XS;
2414 defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
2415 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
2416 SSE_ALU_F64S>,
2417 XD;
2418 }
2419 }
2422 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
2423 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
2424 ValueType vt, X86MemOperand x86memop,
2425 PatFrag ld_frag, string OpcodeStr> {
2426 def rr: SI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
2427 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2428 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))],
2429 IIC_SSE_COMIS_RR>,
2430 Sched<[WriteFAdd]>;
2431 def rm: SI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
2432 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
2433 [(set EFLAGS, (OpNode (vt RC:$src1),
2434 (ld_frag addr:$src2)))],
2435 IIC_SSE_COMIS_RM>,
2436 Sched<[WriteFAddLd, ReadAfterLd]>;
2437 }
2439 let Defs = [EFLAGS] in {
2440 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2441 "ucomiss">, PS, VEX, VEX_LIG;
2442 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2443 "ucomisd">, PD, VEX, VEX_LIG;
2444 let Pattern = []<dag> in {
2445 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2446 "comiss">, PS, VEX, VEX_LIG;
2447 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2448 "comisd">, PD, VEX, VEX_LIG;
2449 }
2451 let isCodeGenOnly = 1 in {
2452 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2453 load, "ucomiss">, PS, VEX;
2454 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2455 load, "ucomisd">, PD, VEX;
2457 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2458 load, "comiss">, PS, VEX;
2459 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2460 load, "comisd">, PD, VEX;
2461 }
2462 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2463 "ucomiss">, PS;
2464 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2465 "ucomisd">, PD;
2467 let Pattern = []<dag> in {
2468 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2469 "comiss">, PS;
2470 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2471 "comisd">, PD;
2472 }
2474 let isCodeGenOnly = 1 in {
2475 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2476 load, "ucomiss">, PS;
2477 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2478 load, "ucomisd">, PD;
2480 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2481 "comiss">, PS;
2482 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2483 "comisd">, PD;
2484 }
2485 } // Defs = [EFLAGS]
2487 // sse12_cmp_packed - sse 1 & 2 compare packed instructions
2488 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2489 Operand CC, Intrinsic Int, string asm,
2490 string asm_alt, Domain d,
2491 OpndItins itins = SSE_ALU_F32P> {
2492 def rri : PIi8<0xC2, MRMSrcReg,
2493 (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
2494 [(set RC:$dst, (Int RC:$src1, RC:$src2, imm:$cc))],
2495 itins.rr, d>,
2496 Sched<[WriteFAdd]>;
2497 def rmi : PIi8<0xC2, MRMSrcMem,
2498 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
2499 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2), imm:$cc))],
2500 itins.rm, d>,
2501 Sched<[WriteFAddLd, ReadAfterLd]>;
2503 // Accept explicit immediate argument form instead of comparison code.
2504 let isAsmParserOnly = 1, hasSideEffects = 0 in {
2505 def rri_alt : PIi8<0xC2, MRMSrcReg,
2506 (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
2507 asm_alt, [], itins.rr, d>, Sched<[WriteFAdd]>;
2508 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2509 (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
2510 asm_alt, [], itins.rm, d>,
2511 Sched<[WriteFAddLd, ReadAfterLd]>;
2512 }
2513 }
2515 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
2516 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2517 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2518 SSEPackedSingle>, PS, VEX_4V;
2519 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
2520 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2521 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2522 SSEPackedDouble>, PD, VEX_4V;
2523 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
2524 "cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2525 "cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2526 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2527 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
2528 "cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2529 "cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
2530 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2531 let Constraints = "$src1 = $dst" in {
2532 defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
2533 "cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
2534 "cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2535 SSEPackedSingle, SSE_ALU_F32P>, PS;
2536 defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
2537 "cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
2538 "cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
2539 SSEPackedDouble, SSE_ALU_F64P>, PD;
2540 }
2542 let Predicates = [HasAVX] in {
2543 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2544 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2545 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2546 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2547 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2548 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2549 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2550 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2552 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2553 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2554 def : Pat<(v8i32 (X86cmpp (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
2555 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2556 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2557 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2558 def : Pat<(v4i64 (X86cmpp (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
2559 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2560 }
2562 let Predicates = [UseSSE1] in {
2563 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2564 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2565 def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2566 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2567 }
2569 let Predicates = [UseSSE2] in {
2570 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2571 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2572 def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2573 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2574 }
2576 //===----------------------------------------------------------------------===//
2577 // SSE 1 & 2 - Shuffle Instructions
2578 //===----------------------------------------------------------------------===//
2580 /// sse12_shuffle - sse 1 & 2 fp shuffle instructions
2581 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2582 ValueType vt, string asm, PatFrag mem_frag,
2583 Domain d> {
2584 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2585 (ins RC:$src1, x86memop:$src2, i8imm:$src3), asm,
2586 [(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
2587 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2588 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2589 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2590 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
2591 [(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
2592 (i8 imm:$src3))))], IIC_SSE_SHUFP, d>,
2593 Sched<[WriteFShuffle]>;
2594 }
2596 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2597 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2598 loadv4f32, SSEPackedSingle>, PS, VEX_4V;
2599 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2600 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2601 loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
2602 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2603 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2604 loadv2f64, SSEPackedDouble>, PD, VEX_4V;
2605 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2606 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2607 loadv4f64, SSEPackedDouble>, PD, VEX_4V, VEX_L;
2609 let Constraints = "$src1 = $dst" in {
2610 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2611 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2612 memopv4f32, SSEPackedSingle>, PS;
2613 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2614 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2615 memopv2f64, SSEPackedDouble>, PD;
2616 }
2618 let Predicates = [HasAVX] in {
2619 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2620 (bc_v4i32 (loadv2i64 addr:$src2)), (i8 imm:$imm))),
2621 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2622 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2623 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2625 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2626 (loadv2i64 addr:$src2), (i8 imm:$imm))),
2627 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2628 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2629 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2631 // 256-bit patterns
2632 def : Pat<(v8i32 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2633 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2634 def : Pat<(v8i32 (X86Shufp VR256:$src1,
2635 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
2636 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2638 def : Pat<(v4i64 (X86Shufp VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2639 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2640 def : Pat<(v4i64 (X86Shufp VR256:$src1,
2641 (loadv4i64 addr:$src2), (i8 imm:$imm))),
2642 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2643 }
2645 let Predicates = [UseSSE1] in {
2646 def : Pat<(v4i32 (X86Shufp VR128:$src1,
2647 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2648 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2649 def : Pat<(v4i32 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2650 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2651 }
2653 let Predicates = [UseSSE2] in {
2654 // Generic SHUFPD patterns
2655 def : Pat<(v2i64 (X86Shufp VR128:$src1,
2656 (memopv2i64 addr:$src2), (i8 imm:$imm))),
2657 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2658 def : Pat<(v2i64 (X86Shufp VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2659 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2660 }
2662 //===----------------------------------------------------------------------===//
2663 // SSE 1 & 2 - Unpack FP Instructions
2664 //===----------------------------------------------------------------------===//
2666 /// sse12_unpack_interleave - sse 1 & 2 fp unpack and interleave
2667 multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
2668 PatFrag mem_frag, RegisterClass RC,
2669 X86MemOperand x86memop, string asm,
2670 Domain d> {
2671 def rr : PI<opc, MRMSrcReg,
2672 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2673 asm, [(set RC:$dst,
2674 (vt (OpNode RC:$src1, RC:$src2)))],
2675 IIC_SSE_UNPCK, d>, Sched<[WriteFShuffle]>;
2676 def rm : PI<opc, MRMSrcMem,
2677 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2678 asm, [(set RC:$dst,
2679 (vt (OpNode RC:$src1,
2680 (mem_frag addr:$src2))))],
2681 IIC_SSE_UNPCK, d>,
2682 Sched<[WriteFShuffleLd, ReadAfterLd]>;
2683 }
2685 defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
2686 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2687 SSEPackedSingle>, PS, VEX_4V;
2688 defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
2689 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2690 SSEPackedDouble>, PD, VEX_4V;
2691 defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
2692 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2693 SSEPackedSingle>, PS, VEX_4V;
2694 defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
2695 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2696 SSEPackedDouble>, PD, VEX_4V;
2698 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
2699 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2700 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2701 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
2702 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2703 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2704 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
2705 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2706 SSEPackedSingle>, PS, VEX_4V, VEX_L;
2707 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
2708 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2709 SSEPackedDouble>, PD, VEX_4V, VEX_L;
2711 let Constraints = "$src1 = $dst" in {
2712 defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
2713 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2714 SSEPackedSingle>, PS;
2715 defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
2716 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2717 SSEPackedDouble>, PD;
2718 defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
2719 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2720 SSEPackedSingle>, PS;
2721 defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
2722 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2723 SSEPackedDouble>, PD;
2724 } // Constraints = "$src1 = $dst"
2726 let Predicates = [HasAVX1Only] in {
2727 def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2728 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2729 def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)),
2730 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2731 def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
2732 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2733 def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)),
2734 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2736 def : Pat<(v4i64 (X86Unpckl VR256:$src1, (loadv4i64 addr:$src2))),
2737 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2738 def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)),
2739 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2740 def : Pat<(v4i64 (X86Unpckh VR256:$src1, (loadv4i64 addr:$src2))),
2741 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2742 def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)),
2743 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2744 }
2746 let Predicates = [HasAVX] in {
2747 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2748 // problem is during lowering, where it's not possible to recognize the load
2749 // fold cause it has two uses through a bitcast. One use disappears at isel
2750 // time and the fold opportunity reappears.
2751 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2752 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2753 }
2755 let Predicates = [UseSSE2] in {
2756 // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the
2757 // problem is during lowering, where it's not possible to recognize the load
2758 // fold cause it has two uses through a bitcast. One use disappears at isel
2759 // time and the fold opportunity reappears.
2760 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2761 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2762 }
2764 //===----------------------------------------------------------------------===//
2765 // SSE 1 & 2 - Extract Floating-Point Sign mask
2766 //===----------------------------------------------------------------------===//
2768 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2769 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2770 Domain d> {
2771 def rr : PI<0x50, MRMSrcReg, (outs GR32orGR64:$dst), (ins RC:$src),
2772 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2773 [(set GR32orGR64:$dst, (Int RC:$src))], IIC_SSE_MOVMSK, d>,
2774 Sched<[WriteVecLogic]>;
2775 }
2777 let Predicates = [HasAVX] in {
2778 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2779 "movmskps", SSEPackedSingle>, PS, VEX;
2780 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2781 "movmskpd", SSEPackedDouble>, PD, VEX;
2782 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2783 "movmskps", SSEPackedSingle>, PS,
2784 VEX, VEX_L;
2785 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2786 "movmskpd", SSEPackedDouble>, PD,
2787 VEX, VEX_L;
2789 def : Pat<(i32 (X86fgetsign FR32:$src)),
2790 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
2791 def : Pat<(i64 (X86fgetsign FR32:$src)),
2792 (SUBREG_TO_REG (i64 0),
2793 (VMOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>;
2794 def : Pat<(i32 (X86fgetsign FR64:$src)),
2795 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
2796 def : Pat<(i64 (X86fgetsign FR64:$src)),
2797 (SUBREG_TO_REG (i64 0),
2798 (VMOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>;
2799 }
2801 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2802 SSEPackedSingle>, PS;
2803 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2804 SSEPackedDouble>, PD;
2806 def : Pat<(i32 (X86fgetsign FR32:$src)),
2807 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128))>,
2808 Requires<[UseSSE1]>;
2809 def : Pat<(i64 (X86fgetsign FR32:$src)),
2810 (SUBREG_TO_REG (i64 0),
2811 (MOVMSKPSrr (COPY_TO_REGCLASS FR32:$src, VR128)), sub_32bit)>,
2812 Requires<[UseSSE1]>;
2813 def : Pat<(i32 (X86fgetsign FR64:$src)),
2814 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128))>,
2815 Requires<[UseSSE2]>;
2816 def : Pat<(i64 (X86fgetsign FR64:$src)),
2817 (SUBREG_TO_REG (i64 0),
2818 (MOVMSKPDrr (COPY_TO_REGCLASS FR64:$src, VR128)), sub_32bit)>,
2819 Requires<[UseSSE2]>;
2821 //===---------------------------------------------------------------------===//
2822 // SSE2 - Packed Integer Logical Instructions
2823 //===---------------------------------------------------------------------===//
2825 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2827 /// PDI_binop_rm - Simple SSE2 binary operator.
2828 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2829 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
2830 X86MemOperand x86memop, OpndItins itins,
2831 bit IsCommutable, bit Is2Addr> {
2832 let isCommutable = IsCommutable in
2833 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
2834 (ins RC:$src1, RC:$src2),
2835 !if(Is2Addr,
2836 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2837 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2838 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
2839 Sched<[itins.Sched]>;
2840 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
2841 (ins RC:$src1, x86memop:$src2),
2842 !if(Is2Addr,
2843 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2844 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2845 [(set RC:$dst, (OpVT (OpNode RC:$src1,
2846 (bitconvert (memop_frag addr:$src2)))))],
2847 itins.rm>,
2848 Sched<[itins.Sched.Folded, ReadAfterLd]>;
2849 }
2850 } // ExeDomain = SSEPackedInt
2852 multiclass PDI_binop_all<bits<8> opc, string OpcodeStr, SDNode Opcode,
2853 ValueType OpVT128, ValueType OpVT256,
2854 OpndItins itins, bit IsCommutable = 0> {
2855 let Predicates = [HasAVX] in
2856 defm V#NAME : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode, OpVT128,
2857 VR128, loadv2i64, i128mem, itins, IsCommutable, 0>, VEX_4V;
2859 let Constraints = "$src1 = $dst" in
2860 defm NAME : PDI_binop_rm<opc, OpcodeStr, Opcode, OpVT128, VR128,
2861 memopv2i64, i128mem, itins, IsCommutable, 1>;
2863 let Predicates = [HasAVX2] in
2864 defm V#NAME#Y : PDI_binop_rm<opc, !strconcat("v", OpcodeStr), Opcode,
2865 OpVT256, VR256, loadv4i64, i256mem, itins,
2866 IsCommutable, 0>, VEX_4V, VEX_L;
2867 }
2869 // These are ordered here for pattern ordering requirements with the fp versions
2871 defm PAND : PDI_binop_all<0xDB, "pand", and, v2i64, v4i64,
2872 SSE_VEC_BIT_ITINS_P, 1>;
2873 defm POR : PDI_binop_all<0xEB, "por", or, v2i64, v4i64,
2874 SSE_VEC_BIT_ITINS_P, 1>;
2875 defm PXOR : PDI_binop_all<0xEF, "pxor", xor, v2i64, v4i64,
2876 SSE_VEC_BIT_ITINS_P, 1>;
2877 defm PANDN : PDI_binop_all<0xDF, "pandn", X86andnp, v2i64, v4i64,
2878 SSE_VEC_BIT_ITINS_P, 0>;
2880 //===----------------------------------------------------------------------===//
2881 // SSE 1 & 2 - Logical Instructions
2882 //===----------------------------------------------------------------------===//
2884 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2885 ///
2886 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2887 SDNode OpNode, OpndItins itins> {
2888 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2889 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
2890 PS, VEX_4V;
2892 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2893 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
2894 PD, VEX_4V;
2896 let Constraints = "$src1 = $dst" in {
2897 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2898 f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
2899 PS;
2901 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2902 f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
2903 PD;
2904 }
2905 }
2907 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2908 let isCodeGenOnly = 1 in {
2909 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand,
2910 SSE_BIT_ITINS_P>;
2911 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for,
2912 SSE_BIT_ITINS_P>;
2913 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor,
2914 SSE_BIT_ITINS_P>;
2916 let isCommutable = 0 in
2917 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", X86fandn,
2918 SSE_BIT_ITINS_P>;
2919 }
2921 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2922 ///
2923 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2924 SDNode OpNode> {
2925 defm V#NAME#PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2926 !strconcat(OpcodeStr, "ps"), f256mem,
2927 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2928 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2929 (loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
2931 defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2932 !strconcat(OpcodeStr, "pd"), f256mem,
2933 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2934 (bc_v4i64 (v4f64 VR256:$src2))))],
2935 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2936 (loadv4i64 addr:$src2)))], 0>,
2937 PD, VEX_4V, VEX_L;
2939 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2940 // are all promoted to v2i64, and the patterns are covered by the int
2941 // version. This is needed in SSE only, because v2i64 isn't supported on
2942 // SSE1, but only on SSE2.
2943 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2944 !strconcat(OpcodeStr, "ps"), f128mem, [],
2945 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2946 (loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
2948 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2949 !strconcat(OpcodeStr, "pd"), f128mem,
2950 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2951 (bc_v2i64 (v2f64 VR128:$src2))))],
2952 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2953 (loadv2i64 addr:$src2)))], 0>,
2954 PD, VEX_4V;
2956 let Constraints = "$src1 = $dst" in {
2957 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2958 !strconcat(OpcodeStr, "ps"), f128mem,
2959 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2960 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2961 (memopv2i64 addr:$src2)))]>, PS;
2963 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2964 !strconcat(OpcodeStr, "pd"), f128mem,
2965 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2966 (bc_v2i64 (v2f64 VR128:$src2))))],
2967 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2968 (memopv2i64 addr:$src2)))]>, PD;
2969 }
2970 }
2972 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2973 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2974 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2975 let isCommutable = 0 in
2976 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2978 // AVX1 requires type coercions in order to fold loads directly into logical
2979 // operations.
2980 let Predicates = [HasAVX1Only] in {
2981 def : Pat<(bc_v8f32 (and VR256:$src1, (loadv4i64 addr:$src2))),
2982 (VANDPSYrm VR256:$src1, addr:$src2)>;
2983 def : Pat<(bc_v8f32 (or VR256:$src1, (loadv4i64 addr:$src2))),
2984 (VORPSYrm VR256:$src1, addr:$src2)>;
2985 def : Pat<(bc_v8f32 (xor VR256:$src1, (loadv4i64 addr:$src2))),
2986 (VXORPSYrm VR256:$src1, addr:$src2)>;
2987 def : Pat<(bc_v8f32 (X86andnp VR256:$src1, (loadv4i64 addr:$src2))),
2988 (VANDNPSYrm VR256:$src1, addr:$src2)>;
2989 }
2991 //===----------------------------------------------------------------------===//
2992 // SSE 1 & 2 - Arithmetic Instructions
2993 //===----------------------------------------------------------------------===//
2995 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2996 /// vector forms.
2997 ///
2998 /// In addition, we also have a special variant of the scalar form here to
2999 /// represent the associated intrinsic operation. This form is unlike the
3000 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
3001 /// and leaves the top elements unmodified (therefore these cannot be commuted).
3002 ///
3003 /// These three forms can each be reg+reg or reg+mem.
3004 ///
3006 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
3007 /// classes below
3008 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
3009 SDNode OpNode, SizeItins itins> {
3010 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
3011 VR128, v4f32, f128mem, loadv4f32,
3012 SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
3013 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
3014 VR128, v2f64, f128mem, loadv2f64,
3015 SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
3017 defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
3018 OpNode, VR256, v8f32, f256mem, loadv8f32,
3019 SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
3020 defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
3021 OpNode, VR256, v4f64, f256mem, loadv4f64,
3022 SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
3024 let Constraints = "$src1 = $dst" in {
3025 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
3026 v4f32, f128mem, memopv4f32, SSEPackedSingle,
3027 itins.s>, PS;
3028 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
3029 v2f64, f128mem, memopv2f64, SSEPackedDouble,
3030 itins.d>, PD;
3031 }
3032 }
3034 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
3035 SizeItins itins> {
3036 defm V#NAME#SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3037 OpNode, FR32, f32mem, itins.s, 0>, XS, VEX_4V, VEX_LIG;
3038 defm V#NAME#SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3039 OpNode, FR64, f64mem, itins.d, 0>, XD, VEX_4V, VEX_LIG;
3041 let Constraints = "$src1 = $dst" in {
3042 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
3043 OpNode, FR32, f32mem, itins.s>, XS;
3044 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
3045 OpNode, FR64, f64mem, itins.d>, XD;
3046 }
3047 }
3049 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
3050 SizeItins itins> {
3051 defm V#NAME#SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3052 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3053 itins.s, 0>, XS, VEX_4V, VEX_LIG;
3054 defm V#NAME#SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3055 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3056 itins.d, 0>, XD, VEX_4V, VEX_LIG;
3058 let Constraints = "$src1 = $dst" in {
3059 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3060 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32,
3061 itins.s>, XS;
3062 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
3063 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64,
3064 itins.d>, XD;
3065 }
3066 }
3068 // Binary Arithmetic instructions
3069 defm ADD : basic_sse12_fp_binop_p<0x58, "add", fadd, SSE_ALU_ITINS_P>,
3070 basic_sse12_fp_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>,
3071 basic_sse12_fp_binop_s_int<0x58, "add", SSE_ALU_ITINS_S>;
3072 defm MUL : basic_sse12_fp_binop_p<0x59, "mul", fmul, SSE_MUL_ITINS_P>,
3073 basic_sse12_fp_binop_s<0x59, "mul", fmul, SSE_MUL_ITINS_S>,
3074 basic_sse12_fp_binop_s_int<0x59, "mul", SSE_MUL_ITINS_S>;
3075 let isCommutable = 0 in {
3076 defm SUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P>,
3077 basic_sse12_fp_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>,
3078 basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S>;
3079 defm DIV : basic_sse12_fp_binop_p<0x5E, "div", fdiv, SSE_DIV_ITINS_P>,
3080 basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S>,
3081 basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S>;
3082 defm MAX : basic_sse12_fp_binop_p<0x5F, "max", X86fmax, SSE_ALU_ITINS_P>,
3083 basic_sse12_fp_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>,
3084 basic_sse12_fp_binop_s_int<0x5F, "max", SSE_ALU_ITINS_S>;
3085 defm MIN : basic_sse12_fp_binop_p<0x5D, "min", X86fmin, SSE_ALU_ITINS_P>,
3086 basic_sse12_fp_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>,
3087 basic_sse12_fp_binop_s_int<0x5D, "min", SSE_ALU_ITINS_S>;
3088 }
3090 let isCodeGenOnly = 1 in {
3091 defm MAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>,
3092 basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>;
3093 defm MINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>,
3094 basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>;
3095 }
3097 // Patterns used to select SSE scalar fp arithmetic instructions from
3098 // a scalar fp operation followed by a blend.
3099 //
3100 // These patterns know, for example, how to select an ADDSS from a
3101 // float add plus vector insert.
3102 //
3103 // The effect is that the backend no longer emits unnecessary vector
3104 // insert instructions immediately after SSE scalar fp instructions
3105 // like addss or mulss.
3106 //
3107 // For example, given the following code:
3108 // __m128 foo(__m128 A, __m128 B) {
3109 // A[0] += B[0];
3110 // return A;
3111 // }
3112 //
3113 // previously we generated:
3114 // addss %xmm0, %xmm1
3115 // movss %xmm1, %xmm0
3116 //
3117 // we now generate:
3118 // addss %xmm1, %xmm0
3120 let Predicates = [UseSSE1] in {
3121 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
3122 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3123 FR32:$src))))),
3124 (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3125 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
3126 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3127 FR32:$src))))),
3128 (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3129 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
3130 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3131 FR32:$src))))),
3132 (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3133 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
3134 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3135 FR32:$src))))),
3136 (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3137 }
3139 let Predicates = [UseSSE2] in {
3140 // SSE2 patterns to select scalar double-precision fp arithmetic instructions
3141 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
3142 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3143 FR64:$src))))),
3144 (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3145 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
3146 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3147 FR64:$src))))),
3148 (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3149 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
3150 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3151 FR64:$src))))),
3152 (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3153 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
3154 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3155 FR64:$src))))),
3156 (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3157 }
3159 let Predicates = [UseSSE41] in {
3160 // If the subtarget has SSE4.1 but not AVX, the vector insert instruction is
3161 // lowered into a X86insertps or a X86Blendi rather than a X86Movss. When
3162 // selecting SSE scalar single-precision fp arithmetic instructions, make
3163 // sure that we correctly match them.
3165 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3166 (fadd (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3167 FR32:$src))), (iPTR 0))),
3168 (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3169 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3170 (fsub (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3171 FR32:$src))), (iPTR 0))),
3172 (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3173 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3174 (fmul (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3175 FR32:$src))), (iPTR 0))),
3176 (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3177 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3178 (fdiv (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3179 FR32:$src))), (iPTR 0))),
3180 (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3182 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
3183 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3184 FR32:$src))), (i8 1))),
3185 (ADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3186 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
3187 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3188 FR32:$src))), (i8 1))),
3189 (SUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3190 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
3191 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3192 FR32:$src))), (i8 1))),
3193 (MULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3194 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
3195 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3196 FR32:$src))), (i8 1))),
3197 (DIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3199 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
3200 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3201 FR64:$src))), (i8 1))),
3202 (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3203 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
3204 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3205 FR64:$src))), (i8 1))),
3206 (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3207 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
3208 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3209 FR64:$src))), (i8 1))),
3210 (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3211 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
3212 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3213 FR64:$src))), (i8 1))),
3214 (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3216 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fadd
3217 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3218 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3219 (ADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3220 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fsub
3221 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3222 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3223 (SUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3224 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fmul
3225 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3226 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3227 (MULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3228 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fdiv
3229 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3230 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3231 (DIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3232 }
3234 let Predicates = [HasAVX] in {
3235 // The following patterns select AVX Scalar single/double precision fp
3236 // arithmetic instructions.
3238 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
3239 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3240 FR64:$src))))),
3241 (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3242 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
3243 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3244 FR64:$src))))),
3245 (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3246 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
3247 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3248 FR64:$src))))),
3249 (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3250 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
3251 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3252 FR64:$src))))),
3253 (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3254 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3255 (fadd (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3256 FR32:$src))), (iPTR 0))),
3257 (VADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3258 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3259 (fsub (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3260 FR32:$src))), (iPTR 0))),
3261 (VSUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3262 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3263 (fmul (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3264 FR32:$src))), (iPTR 0))),
3265 (VMULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3266 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$dst), (v4f32 (scalar_to_vector
3267 (fdiv (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3268 FR32:$src))), (iPTR 0))),
3269 (VDIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3271 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fadd
3272 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3273 FR32:$src))), (i8 1))),
3274 (VADDSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3275 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fsub
3276 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3277 FR32:$src))), (i8 1))),
3278 (VSUBSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3279 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fmul
3280 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3281 FR32:$src))), (i8 1))),
3282 (VMULSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3283 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst), (v4f32 (scalar_to_vector (fdiv
3284 (f32 (vector_extract (v4f32 VR128:$dst), (iPTR 0))),
3285 FR32:$src))), (i8 1))),
3286 (VDIVSSrr_Int v4f32:$dst, (COPY_TO_REGCLASS FR32:$src, VR128))>;
3288 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fadd
3289 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3290 FR64:$src))), (i8 1))),
3291 (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3292 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fsub
3293 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3294 FR64:$src))), (i8 1))),
3295 (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3296 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fmul
3297 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3298 FR64:$src))), (i8 1))),
3299 (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3300 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst), (v2f64 (scalar_to_vector (fdiv
3301 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3302 FR64:$src))), (i8 1))),
3303 (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3305 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fadd
3306 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3307 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3308 (VADDSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3309 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fsub
3310 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3311 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3312 (VSUBSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3313 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fmul
3314 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3315 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3316 (VMULSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3317 def : Pat<(v2f64 (X86Blendi (v2f64 (scalar_to_vector (fdiv
3318 (f64 (vector_extract (v2f64 VR128:$dst), (iPTR 0))),
3319 FR64:$src))), (v2f64 VR128:$dst), (i8 2))),
3320 (VDIVSDrr_Int v2f64:$dst, (COPY_TO_REGCLASS FR64:$src, VR128))>;
3321 }
3323 // Patterns used to select SSE scalar fp arithmetic instructions from
3324 // a vector packed single/double fp operation followed by a vector insert.
3325 //
3326 // The effect is that the backend converts the packed fp instruction
3327 // followed by a vector insert into a single SSE scalar fp instruction.
3328 //
3329 // For example, given the following code:
3330 // __m128 foo(__m128 A, __m128 B) {
3331 // __m128 C = A + B;
3332 // return (__m128) {c[0], a[1], a[2], a[3]};
3333 // }
3334 //
3335 // previously we generated:
3336 // addps %xmm0, %xmm1
3337 // movss %xmm1, %xmm0
3338 //
3339 // we now generate:
3340 // addss %xmm1, %xmm0
3342 let Predicates = [UseSSE1] in {
3343 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3344 (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3345 (ADDSSrr_Int v4f32:$dst, v4f32:$src)>;
3346 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3347 (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3348 (SUBSSrr_Int v4f32:$dst, v4f32:$src)>;
3349 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3350 (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3351 (MULSSrr_Int v4f32:$dst, v4f32:$src)>;
3352 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3353 (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3354 (DIVSSrr_Int v4f32:$dst, v4f32:$src)>;
3355 }
3357 let Predicates = [UseSSE2] in {
3358 // SSE2 patterns to select scalar double-precision fp arithmetic instructions
3359 // from a packed double-precision fp instruction plus movsd.
3361 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3362 (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3363 (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
3364 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3365 (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3366 (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
3367 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3368 (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3369 (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
3370 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3371 (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3372 (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
3373 }
3375 let Predicates = [UseSSE41] in {
3376 // With SSE4.1 we may see these operations using X86Blendi rather than
3377 // X86Movs{s,d}.
3378 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3379 (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3380 (ADDSSrr_Int v4f32:$dst, v4f32:$src)>;
3381 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3382 (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3383 (SUBSSrr_Int v4f32:$dst, v4f32:$src)>;
3384 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3385 (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3386 (MULSSrr_Int v4f32:$dst, v4f32:$src)>;
3387 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3388 (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3389 (DIVSSrr_Int v4f32:$dst, v4f32:$src)>;
3391 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3392 (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3393 (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
3394 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3395 (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3396 (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
3397 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3398 (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3399 (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
3400 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3401 (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3402 (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
3404 def : Pat<(v2f64 (X86Blendi (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3405 (v2f64 VR128:$dst), (i8 2))),
3406 (ADDSDrr_Int v2f64:$dst, v2f64:$src)>;
3407 def : Pat<(v2f64 (X86Blendi (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3408 (v2f64 VR128:$dst), (i8 2))),
3409 (SUBSDrr_Int v2f64:$dst, v2f64:$src)>;
3410 def : Pat<(v2f64 (X86Blendi (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3411 (v2f64 VR128:$dst), (i8 2))),
3412 (MULSDrr_Int v2f64:$dst, v2f64:$src)>;
3413 def : Pat<(v2f64 (X86Blendi (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3414 (v2f64 VR128:$dst), (i8 2))),
3415 (DIVSDrr_Int v2f64:$dst, v2f64:$src)>;
3416 }
3418 let Predicates = [HasAVX] in {
3419 // The following patterns select AVX Scalar single/double precision fp
3420 // arithmetic instructions from a packed single precision fp instruction
3421 // plus movss/movsd.
3423 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3424 (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3425 (VADDSSrr_Int v4f32:$dst, v4f32:$src)>;
3426 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3427 (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3428 (VSUBSSrr_Int v4f32:$dst, v4f32:$src)>;
3429 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3430 (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3431 (VMULSSrr_Int v4f32:$dst, v4f32:$src)>;
3432 def : Pat<(v4f32 (X86Movss (v4f32 VR128:$dst),
3433 (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)))),
3434 (VDIVSSrr_Int v4f32:$dst, v4f32:$src)>;
3435 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3436 (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3437 (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
3438 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3439 (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3440 (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
3441 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3442 (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3443 (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
3444 def : Pat<(v2f64 (X86Movsd (v2f64 VR128:$dst),
3445 (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)))),
3446 (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
3448 // Also handle X86Blendi-based patterns.
3449 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3450 (fadd (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3451 (VADDSSrr_Int v4f32:$dst, v4f32:$src)>;
3452 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3453 (fsub (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3454 (VSUBSSrr_Int v4f32:$dst, v4f32:$src)>;
3455 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3456 (fmul (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3457 (VMULSSrr_Int v4f32:$dst, v4f32:$src)>;
3458 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$dst),
3459 (fdiv (v4f32 VR128:$dst), (v4f32 VR128:$src)), (i8 1))),
3460 (VDIVSSrr_Int v4f32:$dst, v4f32:$src)>;
3462 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3463 (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3464 (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
3465 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3466 (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3467 (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
3468 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3469 (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3470 (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
3471 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$dst),
3472 (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)), (i8 1))),
3473 (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
3475 def : Pat<(v2f64 (X86Blendi (fadd (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3476 (v2f64 VR128:$dst), (i8 2))),
3477 (VADDSDrr_Int v2f64:$dst, v2f64:$src)>;
3478 def : Pat<(v2f64 (X86Blendi (fsub (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3479 (v2f64 VR128:$dst), (i8 2))),
3480 (VSUBSDrr_Int v2f64:$dst, v2f64:$src)>;
3481 def : Pat<(v2f64 (X86Blendi (fmul (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3482 (v2f64 VR128:$dst), (i8 2))),
3483 (VMULSDrr_Int v2f64:$dst, v2f64:$src)>;
3484 def : Pat<(v2f64 (X86Blendi (fdiv (v2f64 VR128:$dst), (v2f64 VR128:$src)),
3485 (v2f64 VR128:$dst), (i8 2))),
3486 (VDIVSDrr_Int v2f64:$dst, v2f64:$src)>;
3487 }
3489 /// Unop Arithmetic
3490 /// In addition, we also have a special variant of the scalar form here to
3491 /// represent the associated intrinsic operation. This form is unlike the
3492 /// plain scalar form, in that it takes an entire vector (instead of a
3493 /// scalar) and leaves the top elements undefined.
3494 ///
3495 /// And, we have a special variant form for a full-vector intrinsic form.
3497 let Sched = WriteFSqrt in {
3498 def SSE_SQRTPS : OpndItins<
3499 IIC_SSE_SQRTPS_RR, IIC_SSE_SQRTPS_RM
3500 >;
3502 def SSE_SQRTSS : OpndItins<
3503 IIC_SSE_SQRTSS_RR, IIC_SSE_SQRTSS_RM
3504 >;
3506 def SSE_SQRTPD : OpndItins<
3507 IIC_SSE_SQRTPD_RR, IIC_SSE_SQRTPD_RM
3508 >;
3510 def SSE_SQRTSD : OpndItins<
3511 IIC_SSE_SQRTSD_RR, IIC_SSE_SQRTSD_RM
3512 >;
3513 }
3515 let Sched = WriteFRsqrt in {
3516 def SSE_RSQRTPS : OpndItins<
3517 IIC_SSE_RSQRTPS_RR, IIC_SSE_RSQRTPS_RM
3518 >;
3520 def SSE_RSQRTSS : OpndItins<
3521 IIC_SSE_RSQRTSS_RR, IIC_SSE_RSQRTSS_RM
3522 >;
3523 }
3525 let Sched = WriteFRcp in {
3526 def SSE_RCPP : OpndItins<
3527 IIC_SSE_RCPP_RR, IIC_SSE_RCPP_RM
3528 >;
3530 def SSE_RCPS : OpndItins<
3531 IIC_SSE_RCPS_RR, IIC_SSE_RCPS_RM
3532 >;
3533 }
3535 /// sse1_fp_unop_s - SSE1 unops in scalar form.
3536 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
3537 SDNode OpNode, Intrinsic F32Int, OpndItins itins> {
3538 let Predicates = [HasAVX], hasSideEffects = 0 in {
3539 def V#NAME#SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst),
3540 (ins FR32:$src1, FR32:$src2),
3541 !strconcat("v", OpcodeStr,
3542 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3543 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3544 let mayLoad = 1 in {
3545 def V#NAME#SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
3546 (ins FR32:$src1,f32mem:$src2),
3547 !strconcat("v", OpcodeStr,
3548 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3549 []>, VEX_4V, VEX_LIG,
3550 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3551 let isCodeGenOnly = 1 in
3552 def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3553 (ins VR128:$src1, ssmem:$src2),
3554 !strconcat("v", OpcodeStr,
3555 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3556 []>, VEX_4V, VEX_LIG,
3557 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3558 }
3559 }
3561 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3562 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3563 [(set FR32:$dst, (OpNode FR32:$src))]>, Sched<[itins.Sched]>;
3564 // For scalar unary operations, fold a load into the operation
3565 // only in OptForSize mode. It eliminates an instruction, but it also
3566 // eliminates a whole-register clobber (the load), so it introduces a
3567 // partial register update condition.
3568 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3569 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3570 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3571 Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
3572 let isCodeGenOnly = 1 in {
3573 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3574 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3575 [(set VR128:$dst, (F32Int VR128:$src))], itins.rr>,
3576 Sched<[itins.Sched]>;
3577 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
3578 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3579 [(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>,
3580 Sched<[itins.Sched.Folded]>;
3581 }
3582 }
3584 /// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
3585 multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
3586 OpndItins itins> {
3587 let Predicates = [HasAVX], hasSideEffects = 0 in {
3588 def V#NAME#SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst),
3589 (ins FR32:$src1, FR32:$src2),
3590 !strconcat("v", OpcodeStr,
3591 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3592 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3593 let mayLoad = 1 in {
3594 def V#NAME#SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
3595 (ins FR32:$src1,f32mem:$src2),
3596 !strconcat("v", OpcodeStr,
3597 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3598 []>, VEX_4V, VEX_LIG,
3599 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3600 let isCodeGenOnly = 1 in
3601 def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3602 (ins VR128:$src1, ssmem:$src2),
3603 !strconcat("v", OpcodeStr,
3604 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3605 []>, VEX_4V, VEX_LIG,
3606 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3607 }
3608 }
3610 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
3611 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3612 [(set FR32:$dst, (OpNode FR32:$src))]>, Sched<[itins.Sched]>;
3613 // For scalar unary operations, fold a load into the operation
3614 // only in OptForSize mode. It eliminates an instruction, but it also
3615 // eliminates a whole-register clobber (the load), so it introduces a
3616 // partial register update condition.
3617 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
3618 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
3619 [(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
3620 Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
3621 let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
3622 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
3623 (ins VR128:$src1, VR128:$src2),
3624 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3625 [], itins.rr>, Sched<[itins.Sched]>;
3626 let mayLoad = 1, hasSideEffects = 0 in
3627 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
3628 (ins VR128:$src1, ssmem:$src2),
3629 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
3630 [], itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
3631 }
3632 }
3634 /// sse1_fp_unop_p - SSE1 unops in packed form.
3635 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
3636 OpndItins itins> {
3637 let Predicates = [HasAVX] in {
3638 def V#NAME#PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3639 !strconcat("v", OpcodeStr,
3640 "ps\t{$src, $dst|$dst, $src}"),
3641 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))],
3642 itins.rr>, VEX, Sched<[itins.Sched]>;
3643 def V#NAME#PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3644 !strconcat("v", OpcodeStr,
3645 "ps\t{$src, $dst|$dst, $src}"),
3646 [(set VR128:$dst, (OpNode (loadv4f32 addr:$src)))],
3647 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3648 def V#NAME#PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3649 !strconcat("v", OpcodeStr,
3650 "ps\t{$src, $dst|$dst, $src}"),
3651 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))],
3652 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3653 def V#NAME#PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3654 !strconcat("v", OpcodeStr,
3655 "ps\t{$src, $dst|$dst, $src}"),
3656 [(set VR256:$dst, (OpNode (loadv8f32 addr:$src)))],
3657 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3658 }
3660 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3661 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3662 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))], itins.rr>,
3663 Sched<[itins.Sched]>;
3664 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3665 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3666 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))], itins.rm>,
3667 Sched<[itins.Sched.Folded]>;
3668 }
3670 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
3671 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
3672 Intrinsic V4F32Int, Intrinsic V8F32Int,
3673 OpndItins itins> {
3674 let isCodeGenOnly = 1 in {
3675 let Predicates = [HasAVX] in {
3676 def V#NAME#PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3677 !strconcat("v", OpcodeStr,
3678 "ps\t{$src, $dst|$dst, $src}"),
3679 [(set VR128:$dst, (V4F32Int VR128:$src))],
3680 itins.rr>, VEX, Sched<[itins.Sched]>;
3681 def V#NAME#PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3682 !strconcat("v", OpcodeStr,
3683 "ps\t{$src, $dst|$dst, $src}"),
3684 [(set VR128:$dst, (V4F32Int (loadv4f32 addr:$src)))],
3685 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3686 def V#NAME#PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3687 !strconcat("v", OpcodeStr,
3688 "ps\t{$src, $dst|$dst, $src}"),
3689 [(set VR256:$dst, (V8F32Int VR256:$src))],
3690 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3691 def V#NAME#PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst),
3692 (ins f256mem:$src),
3693 !strconcat("v", OpcodeStr,
3694 "ps\t{$src, $dst|$dst, $src}"),
3695 [(set VR256:$dst, (V8F32Int (loadv8f32 addr:$src)))],
3696 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3697 }
3699 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3700 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3701 [(set VR128:$dst, (V4F32Int VR128:$src))],
3702 itins.rr>, Sched<[itins.Sched]>;
3703 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3704 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
3705 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
3706 itins.rm>, Sched<[itins.Sched.Folded]>;
3707 } // isCodeGenOnly = 1
3708 }
3710 /// sse2_fp_unop_s - SSE2 unops in scalar form.
3711 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
3712 SDNode OpNode, Intrinsic F64Int, OpndItins itins> {
3713 let Predicates = [HasAVX], hasSideEffects = 0 in {
3714 def V#NAME#SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst),
3715 (ins FR64:$src1, FR64:$src2),
3716 !strconcat("v", OpcodeStr,
3717 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3718 []>, VEX_4V, VEX_LIG, Sched<[itins.Sched]>;
3719 let mayLoad = 1 in {
3720 def V#NAME#SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
3721 (ins FR64:$src1,f64mem:$src2),
3722 !strconcat("v", OpcodeStr,
3723 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3724 []>, VEX_4V, VEX_LIG,
3725 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3726 let isCodeGenOnly = 1 in
3727 def V#NAME#SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
3728 (ins VR128:$src1, sdmem:$src2),
3729 !strconcat("v", OpcodeStr,
3730 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3731 []>, VEX_4V, VEX_LIG,
3732 Sched<[itins.Sched.Folded, ReadAfterLd]>;
3733 }
3734 }
3736 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
3737 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3738 [(set FR64:$dst, (OpNode FR64:$src))], itins.rr>,
3739 Sched<[itins.Sched]>;
3740 // See the comments in sse1_fp_unop_s for why this is OptForSize.
3741 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
3742 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3743 [(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
3744 Requires<[UseSSE2, OptForSize]>, Sched<[itins.Sched.Folded]>;
3745 let isCodeGenOnly = 1 in {
3746 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3747 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3748 [(set VR128:$dst, (F64Int VR128:$src))], itins.rr>,
3749 Sched<[itins.Sched]>;
3750 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
3751 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
3752 [(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>,
3753 Sched<[itins.Sched.Folded]>;
3754 }
3755 }
3757 /// sse2_fp_unop_p - SSE2 unops in vector forms.
3758 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
3759 SDNode OpNode, OpndItins itins> {
3760 let Predicates = [HasAVX] in {
3761 def V#NAME#PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3762 !strconcat("v", OpcodeStr,
3763 "pd\t{$src, $dst|$dst, $src}"),
3764 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))],
3765 itins.rr>, VEX, Sched<[itins.Sched]>;
3766 def V#NAME#PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3767 !strconcat("v", OpcodeStr,
3768 "pd\t{$src, $dst|$dst, $src}"),
3769 [(set VR128:$dst, (OpNode (loadv2f64 addr:$src)))],
3770 itins.rm>, VEX, Sched<[itins.Sched.Folded]>;
3771 def V#NAME#PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3772 !strconcat("v", OpcodeStr,
3773 "pd\t{$src, $dst|$dst, $src}"),
3774 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))],
3775 itins.rr>, VEX, VEX_L, Sched<[itins.Sched]>;
3776 def V#NAME#PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3777 !strconcat("v", OpcodeStr,
3778 "pd\t{$src, $dst|$dst, $src}"),
3779 [(set VR256:$dst, (OpNode (loadv4f64 addr:$src)))],
3780 itins.rm>, VEX, VEX_L, Sched<[itins.Sched.Folded]>;
3781 }
3783 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3784 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3785 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))], itins.rr>,
3786 Sched<[itins.Sched]>;
3787 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3788 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
3789 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))], itins.rm>,
3790 Sched<[itins.Sched.Folded]>;
3791 }
3793 // Square root.
3794 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss,
3795 SSE_SQRTSS>,
3796 sse1_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPS>,
3797 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd,
3798 SSE_SQRTSD>,
3799 sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTPD>;
3801 // Reciprocal approximations. Note that these typically require refinement
3802 // in order to obtain suitable precision.
3803 defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, SSE_RSQRTSS>,
3804 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_RSQRTPS>,
3805 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps,
3806 int_x86_avx_rsqrt_ps_256, SSE_RSQRTPS>;
3807 defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, SSE_RCPS>,
3808 sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPP>,
3809 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps,
3810 int_x86_avx_rcp_ps_256, SSE_RCPP>;
3812 let Predicates = [UseAVX] in {
3813 def : Pat<(f32 (fsqrt FR32:$src)),
3814 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3815 def : Pat<(f32 (fsqrt (load addr:$src))),
3816 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3817 Requires<[HasAVX, OptForSize]>;
3818 def : Pat<(f64 (fsqrt FR64:$src)),
3819 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
3820 def : Pat<(f64 (fsqrt (load addr:$src))),
3821 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
3822 Requires<[HasAVX, OptForSize]>;
3824 def : Pat<(f32 (X86frsqrt FR32:$src)),
3825 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3826 def : Pat<(f32 (X86frsqrt (load addr:$src))),
3827 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3828 Requires<[HasAVX, OptForSize]>;
3830 def : Pat<(f32 (X86frcp FR32:$src)),
3831 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
3832 def : Pat<(f32 (X86frcp (load addr:$src))),
3833 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
3834 Requires<[HasAVX, OptForSize]>;
3835 }
3836 let Predicates = [UseAVX] in {
3837 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
3838 (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)),
3839 (COPY_TO_REGCLASS VR128:$src, FR32)),
3840 VR128)>;
3841 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
3842 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3844 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
3845 (COPY_TO_REGCLASS (VSQRTSDr (f64 (IMPLICIT_DEF)),
3846 (COPY_TO_REGCLASS VR128:$src, FR64)),
3847 VR128)>;
3848 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
3849 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
3850 }
3852 let Predicates = [HasAVX] in {
3853 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3854 (COPY_TO_REGCLASS (VRSQRTSSr (f32 (IMPLICIT_DEF)),
3855 (COPY_TO_REGCLASS VR128:$src, FR32)),
3856 VR128)>;
3857 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
3858 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3860 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3861 (COPY_TO_REGCLASS (VRCPSSr (f32 (IMPLICIT_DEF)),
3862 (COPY_TO_REGCLASS VR128:$src, FR32)),
3863 VR128)>;
3864 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3865 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3866 }
3868 // Reciprocal approximations. Note that these typically require refinement
3869 // in order to obtain suitable precision.
3870 let Predicates = [UseSSE1] in {
3871 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
3872 (RSQRTSSr_Int VR128:$src, VR128:$src)>;
3873 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3874 (RCPSSr_Int VR128:$src, VR128:$src)>;
3875 }
3877 // There is no f64 version of the reciprocal approximation instructions.
3879 //===----------------------------------------------------------------------===//
3880 // SSE 1 & 2 - Non-temporal stores
3881 //===----------------------------------------------------------------------===//
3883 let AddedComplexity = 400 in { // Prefer non-temporal versions
3884 let SchedRW = [WriteStore] in {
3885 let Predicates = [HasAVX, NoVLX] in {
3886 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3887 (ins f128mem:$dst, VR128:$src),
3888 "movntps\t{$src, $dst|$dst, $src}",
3889 [(alignednontemporalstore (v4f32 VR128:$src),
3890 addr:$dst)],
3891 IIC_SSE_MOVNT>, VEX;
3892 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3893 (ins f128mem:$dst, VR128:$src),
3894 "movntpd\t{$src, $dst|$dst, $src}",
3895 [(alignednontemporalstore (v2f64 VR128:$src),
3896 addr:$dst)],
3897 IIC_SSE_MOVNT>, VEX;
3899 let ExeDomain = SSEPackedInt in
3900 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3901 (ins f128mem:$dst, VR128:$src),
3902 "movntdq\t{$src, $dst|$dst, $src}",
3903 [(alignednontemporalstore (v2i64 VR128:$src),
3904 addr:$dst)],
3905 IIC_SSE_MOVNT>, VEX;
3907 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3908 (ins f256mem:$dst, VR256:$src),
3909 "movntps\t{$src, $dst|$dst, $src}",
3910 [(alignednontemporalstore (v8f32 VR256:$src),
3911 addr:$dst)],
3912 IIC_SSE_MOVNT>, VEX, VEX_L;
3913 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3914 (ins f256mem:$dst, VR256:$src),
3915 "movntpd\t{$src, $dst|$dst, $src}",
3916 [(alignednontemporalstore (v4f64 VR256:$src),
3917 addr:$dst)],
3918 IIC_SSE_MOVNT>, VEX, VEX_L;
3919 let ExeDomain = SSEPackedInt in
3920 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3921 (ins f256mem:$dst, VR256:$src),
3922 "movntdq\t{$src, $dst|$dst, $src}",
3923 [(alignednontemporalstore (v4i64 VR256:$src),
3924 addr:$dst)],
3925 IIC_SSE_MOVNT>, VEX, VEX_L;
3926 }
3928 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3929 "movntps\t{$src, $dst|$dst, $src}",
3930 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)],
3931 IIC_SSE_MOVNT>;
3932 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3933 "movntpd\t{$src, $dst|$dst, $src}",
3934 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)],
3935 IIC_SSE_MOVNT>;
3937 let ExeDomain = SSEPackedInt in
3938 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3939 "movntdq\t{$src, $dst|$dst, $src}",
3940 [(alignednontemporalstore (v2i64 VR128:$src), addr:$dst)],
3941 IIC_SSE_MOVNT>;
3943 // There is no AVX form for instructions below this point
3944 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3945 "movnti{l}\t{$src, $dst|$dst, $src}",
3946 [(nontemporalstore (i32 GR32:$src), addr:$dst)],
3947 IIC_SSE_MOVNT>,
3948 PS, Requires<[HasSSE2]>;
3949 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3950 "movnti{q}\t{$src, $dst|$dst, $src}",
3951 [(nontemporalstore (i64 GR64:$src), addr:$dst)],
3952 IIC_SSE_MOVNT>,
3953 PS, Requires<[HasSSE2]>;
3954 } // SchedRW = [WriteStore]
3956 let Predicates = [HasAVX, NoVLX] in {
3957 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3958 (VMOVNTPSmr addr:$dst, VR128:$src)>;
3959 }
3961 def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
3962 (MOVNTPSmr addr:$dst, VR128:$src)>;
3964 } // AddedComplexity
3966 //===----------------------------------------------------------------------===//
3967 // SSE 1 & 2 - Prefetch and memory fence
3968 //===----------------------------------------------------------------------===//
3970 // Prefetch intrinsic.
3971 let Predicates = [HasSSE1], SchedRW = [WriteLoad] in {
3972 def PREFETCHT0 : I<0x18, MRM1m, (outs), (ins i8mem:$src),
3973 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))],
3974 IIC_SSE_PREFETCH>, TB;
3975 def PREFETCHT1 : I<0x18, MRM2m, (outs), (ins i8mem:$src),
3976 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))],
3977 IIC_SSE_PREFETCH>, TB;
3978 def PREFETCHT2 : I<0x18, MRM3m, (outs), (ins i8mem:$src),
3979 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))],
3980 IIC_SSE_PREFETCH>, TB;
3981 def PREFETCHNTA : I<0x18, MRM0m, (outs), (ins i8mem:$src),
3982 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))],
3983 IIC_SSE_PREFETCH>, TB;
3984 }
3986 // FIXME: How should flush instruction be modeled?
3987 let SchedRW = [WriteLoad] in {
3988 // Flush cache
3989 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3990 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)],
3991 IIC_SSE_PREFETCH>, TB, Requires<[HasSSE2]>;
3992 }
3994 let SchedRW = [WriteNop] in {
3995 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3996 // was introduced with SSE2, it's backward compatible.
3997 def PAUSE : I<0x90, RawFrm, (outs), (ins),
3998 "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>,
3999 OBXS, Requires<[HasSSE2]>;
4000 }
4002 let SchedRW = [WriteFence] in {
4003 // Load, store, and memory fence
4004 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
4005 "sfence", [(int_x86_sse_sfence)], IIC_SSE_SFENCE>,
4006 TB, Requires<[HasSSE1]>;
4007 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
4008 "lfence", [(int_x86_sse2_lfence)], IIC_SSE_LFENCE>,
4009 TB, Requires<[HasSSE2]>;
4010 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
4011 "mfence", [(int_x86_sse2_mfence)], IIC_SSE_MFENCE>,
4012 TB, Requires<[HasSSE2]>;
4013 } // SchedRW
4015 def : Pat<(X86SFence), (SFENCE)>;
4016 def : Pat<(X86LFence), (LFENCE)>;
4017 def : Pat<(X86MFence), (MFENCE)>;
4019 //===----------------------------------------------------------------------===//
4020 // SSE 1 & 2 - Load/Store XCSR register
4021 //===----------------------------------------------------------------------===//
4023 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
4024 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
4025 IIC_SSE_LDMXCSR>, VEX, Sched<[WriteLoad]>;
4026 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
4027 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
4028 IIC_SSE_STMXCSR>, VEX, Sched<[WriteStore]>;
4030 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
4031 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)],
4032 IIC_SSE_LDMXCSR>, Sched<[WriteLoad]>;
4033 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
4034 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)],
4035 IIC_SSE_STMXCSR>, Sched<[WriteStore]>;
4037 //===---------------------------------------------------------------------===//
4038 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
4039 //===---------------------------------------------------------------------===//
4041 let ExeDomain = SSEPackedInt in { // SSE integer instructions
4043 let hasSideEffects = 0, SchedRW = [WriteMove] in {
4044 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4045 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
4046 VEX;
4047 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4048 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>,
4049 VEX, VEX_L;
4050 def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4051 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
4052 VEX;
4053 def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4054 "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>,
4055 VEX, VEX_L;
4056 }
4058 // For Disassembler
4059 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
4060 SchedRW = [WriteMove] in {
4061 def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4062 "movdqa\t{$src, $dst|$dst, $src}", [],
4063 IIC_SSE_MOVA_P_RR>,
4064 VEX;
4065 def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
4066 "movdqa\t{$src, $dst|$dst, $src}", [],
4067 IIC_SSE_MOVA_P_RR>, VEX, VEX_L;
4068 def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4069 "movdqu\t{$src, $dst|$dst, $src}", [],
4070 IIC_SSE_MOVU_P_RR>,
4071 VEX;
4072 def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src),
4073 "movdqu\t{$src, $dst|$dst, $src}", [],
4074 IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
4075 }
4077 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
4078 hasSideEffects = 0, SchedRW = [WriteLoad] in {
4079 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4080 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
4081 VEX;
4082 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4083 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>,
4084 VEX, VEX_L;
4085 let Predicates = [HasAVX] in {
4086 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4087 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
4088 XS, VEX;
4089 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4090 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>,
4091 XS, VEX, VEX_L;
4092 }
4093 }
4095 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
4096 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
4097 (ins i128mem:$dst, VR128:$src),
4098 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
4099 VEX;
4100 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
4101 (ins i256mem:$dst, VR256:$src),
4102 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>,
4103 VEX, VEX_L;
4104 let Predicates = [HasAVX] in {
4105 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
4106 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
4107 XS, VEX;
4108 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
4109 "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>,
4110 XS, VEX, VEX_L;
4111 }
4112 }
4114 let SchedRW = [WriteMove] in {
4115 let hasSideEffects = 0 in
4116 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4117 "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>;
4119 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4120 "movdqu\t{$src, $dst|$dst, $src}",
4121 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
4123 // For Disassembler
4124 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
4125 def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4126 "movdqa\t{$src, $dst|$dst, $src}", [],
4127 IIC_SSE_MOVA_P_RR>;
4129 def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
4130 "movdqu\t{$src, $dst|$dst, $src}",
4131 [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>;
4132 }
4133 } // SchedRW
4135 let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1,
4136 hasSideEffects = 0, SchedRW = [WriteLoad] in {
4137 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4138 "movdqa\t{$src, $dst|$dst, $src}",
4139 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/],
4140 IIC_SSE_MOVA_P_RM>;
4141 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4142 "movdqu\t{$src, $dst|$dst, $src}",
4143 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/],
4144 IIC_SSE_MOVU_P_RM>,
4145 XS, Requires<[UseSSE2]>;
4146 }
4148 let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
4149 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
4150 "movdqa\t{$src, $dst|$dst, $src}",
4151 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/],
4152 IIC_SSE_MOVA_P_MR>;
4153 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
4154 "movdqu\t{$src, $dst|$dst, $src}",
4155 [/*(store (v2i64 VR128:$src), addr:$dst)*/],
4156 IIC_SSE_MOVU_P_MR>,
4157 XS, Requires<[UseSSE2]>;
4158 }
4160 } // ExeDomain = SSEPackedInt
4162 let Predicates = [HasAVX] in {
4163 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
4164 (VMOVDQUmr addr:$dst, VR128:$src)>;
4165 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
4166 (VMOVDQUYmr addr:$dst, VR256:$src)>;
4167 }
4168 let Predicates = [UseSSE2] in
4169 def : Pat<(int_x86_sse2_storeu_dq addr:$dst, VR128:$src),
4170 (MOVDQUmr addr:$dst, VR128:$src)>;
4172 //===---------------------------------------------------------------------===//
4173 // SSE2 - Packed Integer Arithmetic Instructions
4174 //===---------------------------------------------------------------------===//
4176 let Sched = WriteVecIMul in
4177 def SSE_PMADD : OpndItins<
4178 IIC_SSE_PMADD, IIC_SSE_PMADD
4179 >;
4181 let ExeDomain = SSEPackedInt in { // SSE integer instructions
4183 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
4184 RegisterClass RC, PatFrag memop_frag,
4185 X86MemOperand x86memop,
4186 OpndItins itins,
4187 bit IsCommutable = 0,
4188 bit Is2Addr = 1> {
4189 let isCommutable = IsCommutable in
4190 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4191 (ins RC:$src1, RC:$src2),
4192 !if(Is2Addr,
4193 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4194 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4195 [(set RC:$dst, (IntId RC:$src1, RC:$src2))], itins.rr>,
4196 Sched<[itins.Sched]>;
4197 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4198 (ins RC:$src1, x86memop:$src2),
4199 !if(Is2Addr,
4200 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4201 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4202 [(set RC:$dst, (IntId RC:$src1, (bitconvert (memop_frag addr:$src2))))],
4203 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
4204 }
4206 multiclass PDI_binop_all_int<bits<8> opc, string OpcodeStr, Intrinsic IntId128,
4207 Intrinsic IntId256, OpndItins itins,
4208 bit IsCommutable = 0> {
4209 let Predicates = [HasAVX] in
4210 defm V#NAME : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId128,
4211 VR128, loadv2i64, i128mem, itins,
4212 IsCommutable, 0>, VEX_4V;
4214 let Constraints = "$src1 = $dst" in
4215 defm NAME : PDI_binop_rm_int<opc, OpcodeStr, IntId128, VR128, memopv2i64,
4216 i128mem, itins, IsCommutable, 1>;
4218 let Predicates = [HasAVX2] in
4219 defm V#NAME#Y : PDI_binop_rm_int<opc, !strconcat("v", OpcodeStr), IntId256,
4220 VR256, loadv4i64, i256mem, itins,
4221 IsCommutable, 0>, VEX_4V, VEX_L;
4222 }
4224 multiclass PDI_binop_rmi<bits<8> opc, bits<8> opc2, Format ImmForm,
4225 string OpcodeStr, SDNode OpNode,
4226 SDNode OpNode2, RegisterClass RC,
4227 ValueType DstVT, ValueType SrcVT, PatFrag bc_frag,
4228 ShiftOpndItins itins,
4229 bit Is2Addr = 1> {
4230 // src2 is always 128-bit
4231 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4232 (ins RC:$src1, VR128:$src2),
4233 !if(Is2Addr,
4234 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4235 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4236 [(set RC:$dst, (DstVT (OpNode RC:$src1, (SrcVT VR128:$src2))))],
4237 itins.rr>, Sched<[WriteVecShift]>;
4238 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4239 (ins RC:$src1, i128mem:$src2),
4240 !if(Is2Addr,
4241 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4242 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4243 [(set RC:$dst, (DstVT (OpNode RC:$src1,
4244 (bc_frag (memopv2i64 addr:$src2)))))], itins.rm>,
4245 Sched<[WriteVecShiftLd, ReadAfterLd]>;
4246 def ri : PDIi8<opc2, ImmForm, (outs RC:$dst),
4247 (ins RC:$src1, i8imm:$src2),
4248 !if(Is2Addr,
4249 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4250 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4251 [(set RC:$dst, (DstVT (OpNode2 RC:$src1, (i8 imm:$src2))))], itins.ri>,
4252 Sched<[WriteVecShift]>;
4253 }
4255 /// PDI_binop_rm2 - Simple SSE2 binary operator with different src and dst types
4256 multiclass PDI_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
4257 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
4258 PatFrag memop_frag, X86MemOperand x86memop,
4259 OpndItins itins,
4260 bit IsCommutable = 0, bit Is2Addr = 1> {
4261 let isCommutable = IsCommutable in
4262 def rr : PDI<opc, MRMSrcReg, (outs RC:$dst),
4263 (ins RC:$src1, RC:$src2),
4264 !if(Is2Addr,
4265 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4266 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4267 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
4268 Sched<[itins.Sched]>;
4269 def rm : PDI<opc, MRMSrcMem, (outs RC:$dst),
4270 (ins RC:$src1, x86memop:$src2),
4271 !if(Is2Addr,
4272 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4273 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4274 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
4275 (bitconvert (memop_frag addr:$src2)))))]>,
4276 Sched<[itins.Sched.Folded, ReadAfterLd]>;
4277 }
4278 } // ExeDomain = SSEPackedInt
4280 defm PADDB : PDI_binop_all<0xFC, "paddb", add, v16i8, v32i8,
4281 SSE_INTALU_ITINS_P, 1>;
4282 defm PADDW : PDI_binop_all<0xFD, "paddw", add, v8i16, v16i16,
4283 SSE_INTALU_ITINS_P, 1>;
4284 defm PADDD : PDI_binop_all<0xFE, "paddd", add, v4i32, v8i32,
4285 SSE_INTALU_ITINS_P, 1>;
4286 defm PADDQ : PDI_binop_all<0xD4, "paddq", add, v2i64, v4i64,
4287 SSE_INTALUQ_ITINS_P, 1>;
4288 defm PMULLW : PDI_binop_all<0xD5, "pmullw", mul, v8i16, v16i16,
4289 SSE_INTMUL_ITINS_P, 1>;
4290 defm PMULHUW : PDI_binop_all<0xE4, "pmulhuw", mulhu, v8i16, v16i16,
4291 SSE_INTMUL_ITINS_P, 1>;
4292 defm PMULHW : PDI_binop_all<0xE5, "pmulhw", mulhs, v8i16, v16i16,
4293 SSE_INTMUL_ITINS_P, 1>;
4294 defm PSUBB : PDI_binop_all<0xF8, "psubb", sub, v16i8, v32i8,
4295 SSE_INTALU_ITINS_P, 0>;
4296 defm PSUBW : PDI_binop_all<0xF9, "psubw", sub, v8i16, v16i16,
4297 SSE_INTALU_ITINS_P, 0>;
4298 defm PSUBD : PDI_binop_all<0xFA, "psubd", sub, v4i32, v8i32,
4299 SSE_INTALU_ITINS_P, 0>;
4300 defm PSUBQ : PDI_binop_all<0xFB, "psubq", sub, v2i64, v4i64,
4301 SSE_INTALUQ_ITINS_P, 0>;
4302 defm PSUBUSB : PDI_binop_all<0xD8, "psubusb", X86subus, v16i8, v32i8,
4303 SSE_INTALU_ITINS_P, 0>;
4304 defm PSUBUSW : PDI_binop_all<0xD9, "psubusw", X86subus, v8i16, v16i16,
4305 SSE_INTALU_ITINS_P, 0>;
4306 defm PMINUB : PDI_binop_all<0xDA, "pminub", X86umin, v16i8, v32i8,
4307 SSE_INTALU_ITINS_P, 1>;
4308 defm PMINSW : PDI_binop_all<0xEA, "pminsw", X86smin, v8i16, v16i16,
4309 SSE_INTALU_ITINS_P, 1>;
4310 defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", X86umax, v16i8, v32i8,
4311 SSE_INTALU_ITINS_P, 1>;
4312 defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", X86smax, v8i16, v16i16,
4313 SSE_INTALU_ITINS_P, 1>;
4315 // Intrinsic forms
4316 defm PSUBSB : PDI_binop_all_int<0xE8, "psubsb", int_x86_sse2_psubs_b,
4317 int_x86_avx2_psubs_b, SSE_INTALU_ITINS_P, 0>;
4318 defm PSUBSW : PDI_binop_all_int<0xE9, "psubsw" , int_x86_sse2_psubs_w,
4319 int_x86_avx2_psubs_w, SSE_INTALU_ITINS_P, 0>;
4320 defm PADDSB : PDI_binop_all_int<0xEC, "paddsb" , int_x86_sse2_padds_b,
4321 int_x86_avx2_padds_b, SSE_INTALU_ITINS_P, 1>;
4322 defm PADDSW : PDI_binop_all_int<0xED, "paddsw" , int_x86_sse2_padds_w,
4323 int_x86_avx2_padds_w, SSE_INTALU_ITINS_P, 1>;
4324 defm PADDUSB : PDI_binop_all_int<0xDC, "paddusb", int_x86_sse2_paddus_b,
4325 int_x86_avx2_paddus_b, SSE_INTALU_ITINS_P, 1>;
4326 defm PADDUSW : PDI_binop_all_int<0xDD, "paddusw", int_x86_sse2_paddus_w,
4327 int_x86_avx2_paddus_w, SSE_INTALU_ITINS_P, 1>;
4328 defm PMADDWD : PDI_binop_all_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd,
4329 int_x86_avx2_pmadd_wd, SSE_PMADD, 1>;
4330 defm PAVGB : PDI_binop_all_int<0xE0, "pavgb", int_x86_sse2_pavg_b,
4331 int_x86_avx2_pavg_b, SSE_INTALU_ITINS_P, 1>;
4332 defm PAVGW : PDI_binop_all_int<0xE3, "pavgw", int_x86_sse2_pavg_w,
4333 int_x86_avx2_pavg_w, SSE_INTALU_ITINS_P, 1>;
4334 defm PSADBW : PDI_binop_all_int<0xF6, "psadbw", int_x86_sse2_psad_bw,
4335 int_x86_avx2_psad_bw, SSE_PMADD, 1>;
4337 let Predicates = [HasAVX] in
4338 defm VPMULUDQ : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v2i64, v4i32, VR128,
4339 loadv2i64, i128mem, SSE_INTMUL_ITINS_P, 1, 0>,
4340 VEX_4V;
4341 let Predicates = [HasAVX2] in
4342 defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32,
4343 VR256, loadv4i64, i256mem,
4344 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
4345 let Constraints = "$src1 = $dst" in
4346 defm PMULUDQ : PDI_binop_rm2<0xF4, "pmuludq", X86pmuludq, v2i64, v4i32, VR128,
4347 memopv2i64, i128mem, SSE_INTMUL_ITINS_P, 1>;
4349 //===---------------------------------------------------------------------===//
4350 // SSE2 - Packed Integer Logical Instructions
4351 //===---------------------------------------------------------------------===//
4353 let Predicates = [HasAVX] in {
4354 defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4355 VR128, v8i16, v8i16, bc_v8i16,
4356 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4357 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4358 VR128, v4i32, v4i32, bc_v4i32,
4359 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4360 defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4361 VR128, v2i64, v2i64, bc_v2i64,
4362 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4364 defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4365 VR128, v8i16, v8i16, bc_v8i16,
4366 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4367 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4368 VR128, v4i32, v4i32, bc_v4i32,
4369 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4370 defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4371 VR128, v2i64, v2i64, bc_v2i64,
4372 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4374 defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4375 VR128, v8i16, v8i16, bc_v8i16,
4376 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4377 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4378 VR128, v4i32, v4i32, bc_v4i32,
4379 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
4381 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
4382 // 128-bit logical shifts.
4383 def VPSLLDQri : PDIi8<0x73, MRM7r,
4384 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4385 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4386 [(set VR128:$dst,
4387 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))]>,
4388 VEX_4V;
4389 def VPSRLDQri : PDIi8<0x73, MRM3r,
4390 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4391 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4392 [(set VR128:$dst,
4393 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))]>,
4394 VEX_4V;
4395 // PSRADQri doesn't exist in SSE[1-3].
4396 }
4397 } // Predicates = [HasAVX]
4399 let Predicates = [HasAVX2] in {
4400 defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
4401 VR256, v16i16, v8i16, bc_v8i16,
4402 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4403 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
4404 VR256, v8i32, v4i32, bc_v4i32,
4405 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4406 defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli,
4407 VR256, v4i64, v2i64, bc_v2i64,
4408 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4410 defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
4411 VR256, v16i16, v8i16, bc_v8i16,
4412 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4413 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
4414 VR256, v8i32, v4i32, bc_v4i32,
4415 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4416 defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli,
4417 VR256, v4i64, v2i64, bc_v2i64,
4418 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4420 defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
4421 VR256, v16i16, v8i16, bc_v8i16,
4422 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4423 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
4424 VR256, v8i32, v4i32, bc_v4i32,
4425 SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
4427 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
4428 // 256-bit logical shifts.
4429 def VPSLLDQYri : PDIi8<0x73, MRM7r,
4430 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
4431 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4432 [(set VR256:$dst,
4433 (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>,
4434 VEX_4V, VEX_L;
4435 def VPSRLDQYri : PDIi8<0x73, MRM3r,
4436 (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2),
4437 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4438 [(set VR256:$dst,
4439 (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>,
4440 VEX_4V, VEX_L;
4441 // PSRADQYri doesn't exist in SSE[1-3].
4442 }
4443 } // Predicates = [HasAVX2]
4445 let Constraints = "$src1 = $dst" in {
4446 defm PSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "psllw", X86vshl, X86vshli,
4447 VR128, v8i16, v8i16, bc_v8i16,
4448 SSE_INTSHIFT_ITINS_P>;
4449 defm PSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "pslld", X86vshl, X86vshli,
4450 VR128, v4i32, v4i32, bc_v4i32,
4451 SSE_INTSHIFT_ITINS_P>;
4452 defm PSLLQ : PDI_binop_rmi<0xF3, 0x73, MRM6r, "psllq", X86vshl, X86vshli,
4453 VR128, v2i64, v2i64, bc_v2i64,
4454 SSE_INTSHIFT_ITINS_P>;
4456 defm PSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "psrlw", X86vsrl, X86vsrli,
4457 VR128, v8i16, v8i16, bc_v8i16,
4458 SSE_INTSHIFT_ITINS_P>;
4459 defm PSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "psrld", X86vsrl, X86vsrli,
4460 VR128, v4i32, v4i32, bc_v4i32,
4461 SSE_INTSHIFT_ITINS_P>;
4462 defm PSRLQ : PDI_binop_rmi<0xD3, 0x73, MRM2r, "psrlq", X86vsrl, X86vsrli,
4463 VR128, v2i64, v2i64, bc_v2i64,
4464 SSE_INTSHIFT_ITINS_P>;
4466 defm PSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "psraw", X86vsra, X86vsrai,
4467 VR128, v8i16, v8i16, bc_v8i16,
4468 SSE_INTSHIFT_ITINS_P>;
4469 defm PSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "psrad", X86vsra, X86vsrai,
4470 VR128, v4i32, v4i32, bc_v4i32,
4471 SSE_INTSHIFT_ITINS_P>;
4473 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] in {
4474 // 128-bit logical shifts.
4475 def PSLLDQri : PDIi8<0x73, MRM7r,
4476 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4477 "pslldq\t{$src2, $dst|$dst, $src2}",
4478 [(set VR128:$dst,
4479 (int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2))],
4480 IIC_SSE_INTSHDQ_P_RI>;
4481 def PSRLDQri : PDIi8<0x73, MRM3r,
4482 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
4483 "psrldq\t{$src2, $dst|$dst, $src2}",
4484 [(set VR128:$dst,
4485 (int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2))],
4486 IIC_SSE_INTSHDQ_P_RI>;
4487 // PSRADQri doesn't exist in SSE[1-3].
4488 }
4489 } // Constraints = "$src1 = $dst"
4491 let Predicates = [HasAVX] in {
4492 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4493 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4494 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4495 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4496 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4497 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4499 // Shift up / down and insert zero's.
4500 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4501 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4502 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4503 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4504 }
4506 let Predicates = [HasAVX2] in {
4507 def : Pat<(int_x86_avx2_psll_dq VR256:$src1, imm:$src2),
4508 (VPSLLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4509 def : Pat<(int_x86_avx2_psrl_dq VR256:$src1, imm:$src2),
4510 (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>;
4511 }
4513 let Predicates = [UseSSE2] in {
4514 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
4515 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4516 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
4517 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4518 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
4519 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2))>;
4521 // Shift up / down and insert zero's.
4522 def : Pat<(v2i64 (X86vshldq VR128:$src, (i8 imm:$amt))),
4523 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4524 def : Pat<(v2i64 (X86vshrdq VR128:$src, (i8 imm:$amt))),
4525 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt))>;
4526 }
4528 //===---------------------------------------------------------------------===//
4529 // SSE2 - Packed Integer Comparison Instructions
4530 //===---------------------------------------------------------------------===//
4532 defm PCMPEQB : PDI_binop_all<0x74, "pcmpeqb", X86pcmpeq, v16i8, v32i8,
4533 SSE_INTALU_ITINS_P, 1>;
4534 defm PCMPEQW : PDI_binop_all<0x75, "pcmpeqw", X86pcmpeq, v8i16, v16i16,
4535 SSE_INTALU_ITINS_P, 1>;
4536 defm PCMPEQD : PDI_binop_all<0x76, "pcmpeqd", X86pcmpeq, v4i32, v8i32,
4537 SSE_INTALU_ITINS_P, 1>;
4538 defm PCMPGTB : PDI_binop_all<0x64, "pcmpgtb", X86pcmpgt, v16i8, v32i8,
4539 SSE_INTALU_ITINS_P, 0>;
4540 defm PCMPGTW : PDI_binop_all<0x65, "pcmpgtw", X86pcmpgt, v8i16, v16i16,
4541 SSE_INTALU_ITINS_P, 0>;
4542 defm PCMPGTD : PDI_binop_all<0x66, "pcmpgtd", X86pcmpgt, v4i32, v8i32,
4543 SSE_INTALU_ITINS_P, 0>;
4545 //===---------------------------------------------------------------------===//
4546 // SSE2 - Packed Integer Shuffle Instructions
4547 //===---------------------------------------------------------------------===//
4549 let ExeDomain = SSEPackedInt in {
4550 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt128, ValueType vt256,
4551 SDNode OpNode> {
4552 let Predicates = [HasAVX] in {
4553 def V#NAME#ri : Ii8<0x70, MRMSrcReg, (outs VR128:$dst),
4554 (ins VR128:$src1, i8imm:$src2),
4555 !strconcat("v", OpcodeStr,
4556 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4557 [(set VR128:$dst,
4558 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4559 IIC_SSE_PSHUF_RI>, VEX, Sched<[WriteShuffle]>;
4560 def V#NAME#mi : Ii8<0x70, MRMSrcMem, (outs VR128:$dst),
4561 (ins i128mem:$src1, i8imm:$src2),
4562 !strconcat("v", OpcodeStr,
4563 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4564 [(set VR128:$dst,
4565 (vt128 (OpNode (bitconvert (loadv2i64 addr:$src1)),
4566 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX,
4567 Sched<[WriteShuffleLd]>;
4568 }
4570 let Predicates = [HasAVX2] in {
4571 def V#NAME#Yri : Ii8<0x70, MRMSrcReg, (outs VR256:$dst),
4572 (ins VR256:$src1, i8imm:$src2),
4573 !strconcat("v", OpcodeStr,
4574 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4575 [(set VR256:$dst,
4576 (vt256 (OpNode VR256:$src1, (i8 imm:$src2))))],
4577 IIC_SSE_PSHUF_RI>, VEX, VEX_L, Sched<[WriteShuffle]>;
4578 def V#NAME#Ymi : Ii8<0x70, MRMSrcMem, (outs VR256:$dst),
4579 (ins i256mem:$src1, i8imm:$src2),
4580 !strconcat("v", OpcodeStr,
4581 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4582 [(set VR256:$dst,
4583 (vt256 (OpNode (bitconvert (loadv4i64 addr:$src1)),
4584 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>, VEX, VEX_L,
4585 Sched<[WriteShuffleLd]>;
4586 }
4588 let Predicates = [UseSSE2] in {
4589 def ri : Ii8<0x70, MRMSrcReg,
4590 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
4591 !strconcat(OpcodeStr,
4592 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4593 [(set VR128:$dst,
4594 (vt128 (OpNode VR128:$src1, (i8 imm:$src2))))],
4595 IIC_SSE_PSHUF_RI>, Sched<[WriteShuffle]>;
4596 def mi : Ii8<0x70, MRMSrcMem,
4597 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
4598 !strconcat(OpcodeStr,
4599 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4600 [(set VR128:$dst,
4601 (vt128 (OpNode (bitconvert (memopv2i64 addr:$src1)),
4602 (i8 imm:$src2))))], IIC_SSE_PSHUF_MI>,
4603 Sched<[WriteShuffleLd, ReadAfterLd]>;
4604 }
4605 }
4606 } // ExeDomain = SSEPackedInt
4608 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, v8i32, X86PShufd>, PD;
4609 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, v16i16, X86PShufhw>, XS;
4610 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, v16i16, X86PShuflw>, XD;
4612 let Predicates = [HasAVX] in {
4613 def : Pat<(v4f32 (X86PShufd (loadv4f32 addr:$src1), (i8 imm:$imm))),
4614 (VPSHUFDmi addr:$src1, imm:$imm)>;
4615 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4616 (VPSHUFDri VR128:$src1, imm:$imm)>;
4617 }
4619 let Predicates = [UseSSE2] in {
4620 def : Pat<(v4f32 (X86PShufd (memopv4f32 addr:$src1), (i8 imm:$imm))),
4621 (PSHUFDmi addr:$src1, imm:$imm)>;
4622 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
4623 (PSHUFDri VR128:$src1, imm:$imm)>;
4624 }
4626 //===---------------------------------------------------------------------===//
4627 // Packed Integer Pack Instructions (SSE & AVX)
4628 //===---------------------------------------------------------------------===//
4630 let ExeDomain = SSEPackedInt in {
4631 multiclass sse2_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4632 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4633 bit Is2Addr = 1> {
4634 def rr : PDI<opc, MRMSrcReg,
4635 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4636 !if(Is2Addr,
4637 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4638 !strconcat(OpcodeStr,
4639 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4640 [(set VR128:$dst,
4641 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4642 Sched<[WriteShuffle]>;
4643 def rm : PDI<opc, MRMSrcMem,
4644 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4645 !if(Is2Addr,
4646 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4647 !strconcat(OpcodeStr,
4648 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4649 [(set VR128:$dst,
4650 (OutVT (OpNode VR128:$src1,
4651 (bc_frag (memopv2i64 addr:$src2)))))]>,
4652 Sched<[WriteShuffleLd, ReadAfterLd]>;
4653 }
4655 multiclass sse2_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4656 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4657 def Yrr : PDI<opc, MRMSrcReg,
4658 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4659 !strconcat(OpcodeStr,
4660 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4661 [(set VR256:$dst,
4662 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4663 Sched<[WriteShuffle]>;
4664 def Yrm : PDI<opc, MRMSrcMem,
4665 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4666 !strconcat(OpcodeStr,
4667 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4668 [(set VR256:$dst,
4669 (OutVT (OpNode VR256:$src1,
4670 (bc_frag (memopv4i64 addr:$src2)))))]>,
4671 Sched<[WriteShuffleLd, ReadAfterLd]>;
4672 }
4674 multiclass sse4_pack<bits<8> opc, string OpcodeStr, ValueType OutVT,
4675 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag,
4676 bit Is2Addr = 1> {
4677 def rr : SS48I<opc, MRMSrcReg,
4678 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4679 !if(Is2Addr,
4680 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4681 !strconcat(OpcodeStr,
4682 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4683 [(set VR128:$dst,
4684 (OutVT (OpNode (ArgVT VR128:$src1), VR128:$src2)))]>,
4685 Sched<[WriteShuffle]>;
4686 def rm : SS48I<opc, MRMSrcMem,
4687 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4688 !if(Is2Addr,
4689 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4690 !strconcat(OpcodeStr,
4691 "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4692 [(set VR128:$dst,
4693 (OutVT (OpNode VR128:$src1,
4694 (bc_frag (memopv2i64 addr:$src2)))))]>,
4695 Sched<[WriteShuffleLd, ReadAfterLd]>;
4696 }
4698 multiclass sse4_pack_y<bits<8> opc, string OpcodeStr, ValueType OutVT,
4699 ValueType ArgVT, SDNode OpNode, PatFrag bc_frag> {
4700 def Yrr : SS48I<opc, MRMSrcReg,
4701 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4702 !strconcat(OpcodeStr,
4703 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4704 [(set VR256:$dst,
4705 (OutVT (OpNode (ArgVT VR256:$src1), VR256:$src2)))]>,
4706 Sched<[WriteShuffle]>;
4707 def Yrm : SS48I<opc, MRMSrcMem,
4708 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4709 !strconcat(OpcodeStr,
4710 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4711 [(set VR256:$dst,
4712 (OutVT (OpNode VR256:$src1,
4713 (bc_frag (memopv4i64 addr:$src2)))))]>,
4714 Sched<[WriteShuffleLd, ReadAfterLd]>;
4715 }
4717 let Predicates = [HasAVX] in {
4718 defm VPACKSSWB : sse2_pack<0x63, "vpacksswb", v16i8, v8i16, X86Packss,
4719 bc_v8i16, 0>, VEX_4V;
4720 defm VPACKSSDW : sse2_pack<0x6B, "vpackssdw", v8i16, v4i32, X86Packss,
4721 bc_v4i32, 0>, VEX_4V;
4723 defm VPACKUSWB : sse2_pack<0x67, "vpackuswb", v16i8, v8i16, X86Packus,
4724 bc_v8i16, 0>, VEX_4V;
4725 defm VPACKUSDW : sse4_pack<0x2B, "vpackusdw", v8i16, v4i32, X86Packus,
4726 bc_v4i32, 0>, VEX_4V;
4727 }
4729 let Predicates = [HasAVX2] in {
4730 defm VPACKSSWB : sse2_pack_y<0x63, "vpacksswb", v32i8, v16i16, X86Packss,
4731 bc_v16i16>, VEX_4V, VEX_L;
4732 defm VPACKSSDW : sse2_pack_y<0x6B, "vpackssdw", v16i16, v8i32, X86Packss,
4733 bc_v8i32>, VEX_4V, VEX_L;
4735 defm VPACKUSWB : sse2_pack_y<0x67, "vpackuswb", v32i8, v16i16, X86Packus,
4736 bc_v16i16>, VEX_4V, VEX_L;
4737 defm VPACKUSDW : sse4_pack_y<0x2B, "vpackusdw", v16i16, v8i32, X86Packus,
4738 bc_v8i32>, VEX_4V, VEX_L;
4739 }
4741 let Constraints = "$src1 = $dst" in {
4742 defm PACKSSWB : sse2_pack<0x63, "packsswb", v16i8, v8i16, X86Packss,
4743 bc_v8i16>;
4744 defm PACKSSDW : sse2_pack<0x6B, "packssdw", v8i16, v4i32, X86Packss,
4745 bc_v4i32>;
4747 defm PACKUSWB : sse2_pack<0x67, "packuswb", v16i8, v8i16, X86Packus,
4748 bc_v8i16>;
4750 let Predicates = [HasSSE41] in
4751 defm PACKUSDW : sse4_pack<0x2B, "packusdw", v8i16, v4i32, X86Packus,
4752 bc_v4i32>;
4753 }
4754 } // ExeDomain = SSEPackedInt
4756 //===---------------------------------------------------------------------===//
4757 // SSE2 - Packed Integer Unpack Instructions
4758 //===---------------------------------------------------------------------===//
4760 let ExeDomain = SSEPackedInt in {
4761 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
4762 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
4763 def rr : PDI<opc, MRMSrcReg,
4764 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
4765 !if(Is2Addr,
4766 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4767 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4768 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))],
4769 IIC_SSE_UNPCK>, Sched<[WriteShuffle]>;
4770 def rm : PDI<opc, MRMSrcMem,
4771 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
4772 !if(Is2Addr,
4773 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
4774 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4775 [(set VR128:$dst, (OpNode VR128:$src1,
4776 (bc_frag (memopv2i64
4777 addr:$src2))))],
4778 IIC_SSE_UNPCK>,
4779 Sched<[WriteShuffleLd, ReadAfterLd]>;
4780 }
4782 multiclass sse2_unpack_y<bits<8> opc, string OpcodeStr, ValueType vt,
4783 SDNode OpNode, PatFrag bc_frag> {
4784 def Yrr : PDI<opc, MRMSrcReg,
4785 (outs VR256:$dst), (ins VR256:$src1, VR256:$src2),
4786 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4787 [(set VR256:$dst, (vt (OpNode VR256:$src1, VR256:$src2)))]>,
4788 Sched<[WriteShuffle]>;
4789 def Yrm : PDI<opc, MRMSrcMem,
4790 (outs VR256:$dst), (ins VR256:$src1, i256mem:$src2),
4791 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4792 [(set VR256:$dst, (OpNode VR256:$src1,
4793 (bc_frag (memopv4i64 addr:$src2))))]>,
4794 Sched<[WriteShuffleLd, ReadAfterLd]>;
4795 }
4797 let Predicates = [HasAVX] in {
4798 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Unpckl,
4799 bc_v16i8, 0>, VEX_4V;
4800 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Unpckl,
4801 bc_v8i16, 0>, VEX_4V;
4802 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Unpckl,
4803 bc_v4i32, 0>, VEX_4V;
4804 defm VPUNPCKLQDQ : sse2_unpack<0x6C, "vpunpcklqdq", v2i64, X86Unpckl,
4805 bc_v2i64, 0>, VEX_4V;
4807 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Unpckh,
4808 bc_v16i8, 0>, VEX_4V;
4809 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Unpckh,
4810 bc_v8i16, 0>, VEX_4V;
4811 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Unpckh,
4812 bc_v4i32, 0>, VEX_4V;
4813 defm VPUNPCKHQDQ : sse2_unpack<0x6D, "vpunpckhqdq", v2i64, X86Unpckh,
4814 bc_v2i64, 0>, VEX_4V;
4815 }
4817 let Predicates = [HasAVX2] in {
4818 defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl,
4819 bc_v32i8>, VEX_4V, VEX_L;
4820 defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl,
4821 bc_v16i16>, VEX_4V, VEX_L;
4822 defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl,
4823 bc_v8i32>, VEX_4V, VEX_L;
4824 defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl,
4825 bc_v4i64>, VEX_4V, VEX_L;
4827 defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh,
4828 bc_v32i8>, VEX_4V, VEX_L;
4829 defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh,
4830 bc_v16i16>, VEX_4V, VEX_L;
4831 defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh,
4832 bc_v8i32>, VEX_4V, VEX_L;
4833 defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh,
4834 bc_v4i64>, VEX_4V, VEX_L;
4835 }
4837 let Constraints = "$src1 = $dst" in {
4838 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Unpckl,
4839 bc_v16i8>;
4840 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Unpckl,
4841 bc_v8i16>;
4842 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Unpckl,
4843 bc_v4i32>;
4844 defm PUNPCKLQDQ : sse2_unpack<0x6C, "punpcklqdq", v2i64, X86Unpckl,
4845 bc_v2i64>;
4847 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Unpckh,
4848 bc_v16i8>;
4849 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Unpckh,
4850 bc_v8i16>;
4851 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Unpckh,
4852 bc_v4i32>;
4853 defm PUNPCKHQDQ : sse2_unpack<0x6D, "punpckhqdq", v2i64, X86Unpckh,
4854 bc_v2i64>;
4855 }
4856 } // ExeDomain = SSEPackedInt
4858 //===---------------------------------------------------------------------===//
4859 // SSE2 - Packed Integer Extract and Insert
4860 //===---------------------------------------------------------------------===//
4862 let ExeDomain = SSEPackedInt in {
4863 multiclass sse2_pinsrw<bit Is2Addr = 1> {
4864 def rri : Ii8<0xC4, MRMSrcReg,
4865 (outs VR128:$dst), (ins VR128:$src1,
4866 GR32orGR64:$src2, i32i8imm:$src3),
4867 !if(Is2Addr,
4868 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4869 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4870 [(set VR128:$dst,
4871 (X86pinsrw VR128:$src1, GR32orGR64:$src2, imm:$src3))],
4872 IIC_SSE_PINSRW>, Sched<[WriteShuffle]>;
4873 def rmi : Ii8<0xC4, MRMSrcMem,
4874 (outs VR128:$dst), (ins VR128:$src1,
4875 i16mem:$src2, i32i8imm:$src3),
4876 !if(Is2Addr,
4877 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
4878 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4879 [(set VR128:$dst,
4880 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
4881 imm:$src3))], IIC_SSE_PINSRW>,
4882 Sched<[WriteShuffleLd, ReadAfterLd]>;
4883 }
4885 // Extract
4886 let Predicates = [HasAVX] in
4887 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
4888 (outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
4889 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4890 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4891 imm:$src2))]>, PD, VEX,
4892 Sched<[WriteShuffle]>;
4893 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
4894 (outs GR32orGR64:$dst), (ins VR128:$src1, i32i8imm:$src2),
4895 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
4896 [(set GR32orGR64:$dst, (X86pextrw (v8i16 VR128:$src1),
4897 imm:$src2))], IIC_SSE_PEXTRW>,
4898 Sched<[WriteShuffleLd, ReadAfterLd]>;
4900 // Insert
4901 let Predicates = [HasAVX] in
4902 defm VPINSRW : sse2_pinsrw<0>, PD, VEX_4V;
4904 let Predicates = [UseSSE2], Constraints = "$src1 = $dst" in
4905 defm PINSRW : sse2_pinsrw, PD;
4907 } // ExeDomain = SSEPackedInt
4909 //===---------------------------------------------------------------------===//
4910 // SSE2 - Packed Mask Creation
4911 //===---------------------------------------------------------------------===//
4913 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecLogic] in {
4915 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4916 (ins VR128:$src),
4917 "pmovmskb\t{$src, $dst|$dst, $src}",
4918 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4919 IIC_SSE_MOVMSK>, VEX;
4921 let Predicates = [HasAVX2] in {
4922 def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst),
4923 (ins VR256:$src),
4924 "pmovmskb\t{$src, $dst|$dst, $src}",
4925 [(set GR32orGR64:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>,
4926 VEX, VEX_L;
4927 }
4929 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32orGR64:$dst), (ins VR128:$src),
4930 "pmovmskb\t{$src, $dst|$dst, $src}",
4931 [(set GR32orGR64:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))],
4932 IIC_SSE_MOVMSK>;
4934 } // ExeDomain = SSEPackedInt
4936 //===---------------------------------------------------------------------===//
4937 // SSE2 - Conditional Store
4938 //===---------------------------------------------------------------------===//
4940 let ExeDomain = SSEPackedInt, SchedRW = [WriteStore] in {
4942 let Uses = [EDI], Predicates = [HasAVX,Not64BitMode] in
4943 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
4944 (ins VR128:$src, VR128:$mask),
4945 "maskmovdqu\t{$mask, $src|$src, $mask}",
4946 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4947 IIC_SSE_MASKMOV>, VEX;
4948 let Uses = [RDI], Predicates = [HasAVX,In64BitMode] in
4949 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
4950 (ins VR128:$src, VR128:$mask),
4951 "maskmovdqu\t{$mask, $src|$src, $mask}",
4952 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4953 IIC_SSE_MASKMOV>, VEX;
4955 let Uses = [EDI], Predicates = [UseSSE2,Not64BitMode] in
4956 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4957 "maskmovdqu\t{$mask, $src|$src, $mask}",
4958 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)],
4959 IIC_SSE_MASKMOV>;
4960 let Uses = [RDI], Predicates = [UseSSE2,In64BitMode] in
4961 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
4962 "maskmovdqu\t{$mask, $src|$src, $mask}",
4963 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)],
4964 IIC_SSE_MASKMOV>;
4966 } // ExeDomain = SSEPackedInt
4968 //===---------------------------------------------------------------------===//
4969 // SSE2 - Move Doubleword
4970 //===---------------------------------------------------------------------===//
4972 //===---------------------------------------------------------------------===//
4973 // Move Int Doubleword to Packed Double Int
4974 //
4975 def VMOVDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4976 "movd\t{$src, $dst|$dst, $src}",
4977 [(set VR128:$dst,
4978 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
4979 VEX, Sched<[WriteMove]>;
4980 def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4981 "movd\t{$src, $dst|$dst, $src}",
4982 [(set VR128:$dst,
4983 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
4984 IIC_SSE_MOVDQ>,
4985 VEX, Sched<[WriteLoad]>;
4986 def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4987 "movq\t{$src, $dst|$dst, $src}",
4988 [(set VR128:$dst,
4989 (v2i64 (scalar_to_vector GR64:$src)))],
4990 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4991 let isCodeGenOnly = 1 in
4992 def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4993 "movq\t{$src, $dst|$dst, $src}",
4994 [(set FR64:$dst, (bitconvert GR64:$src))],
4995 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
4997 def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4998 "movd\t{$src, $dst|$dst, $src}",
4999 [(set VR128:$dst,
5000 (v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
5001 Sched<[WriteMove]>;
5002 def MOVDI2PDIrm : S2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
5003 "movd\t{$src, $dst|$dst, $src}",
5004 [(set VR128:$dst,
5005 (v4i32 (scalar_to_vector (loadi32 addr:$src))))],
5006 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
5007 def MOV64toPQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
5008 "mov{d|q}\t{$src, $dst|$dst, $src}",
5009 [(set VR128:$dst,
5010 (v2i64 (scalar_to_vector GR64:$src)))],
5011 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
5012 let isCodeGenOnly = 1 in
5013 def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
5014 "mov{d|q}\t{$src, $dst|$dst, $src}",
5015 [(set FR64:$dst, (bitconvert GR64:$src))],
5016 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
5018 //===---------------------------------------------------------------------===//
5019 // Move Int Doubleword to Single Scalar
5020 //
5021 let isCodeGenOnly = 1 in {
5022 def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
5023 "movd\t{$src, $dst|$dst, $src}",
5024 [(set FR32:$dst, (bitconvert GR32:$src))],
5025 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
5027 def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
5028 "movd\t{$src, $dst|$dst, $src}",
5029 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
5030 IIC_SSE_MOVDQ>,
5031 VEX, Sched<[WriteLoad]>;
5032 def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
5033 "movd\t{$src, $dst|$dst, $src}",
5034 [(set FR32:$dst, (bitconvert GR32:$src))],
5035 IIC_SSE_MOVDQ>, Sched<[WriteMove]>;
5037 def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
5038 "movd\t{$src, $dst|$dst, $src}",
5039 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))],
5040 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
5041 }
5043 //===---------------------------------------------------------------------===//
5044 // Move Packed Doubleword Int to Packed Double Int
5045 //
5046 def VMOVPDI2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
5047 "movd\t{$src, $dst|$dst, $src}",
5048 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
5049 (iPTR 0)))], IIC_SSE_MOVD_ToGP>, VEX,
5050 Sched<[WriteMove]>;
5051 def VMOVPDI2DImr : VS2I<0x7E, MRMDestMem, (outs),
5052 (ins i32mem:$dst, VR128:$src),
5053 "movd\t{$src, $dst|$dst, $src}",
5054 [(store (i32 (vector_extract (v4i32 VR128:$src),
5055 (iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
5056 VEX, Sched<[WriteStore]>;
5057 def MOVPDI2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
5058 "movd\t{$src, $dst|$dst, $src}",
5059 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
5060 (iPTR 0)))], IIC_SSE_MOVD_ToGP>,
5061 Sched<[WriteMove]>;
5062 def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
5063 "movd\t{$src, $dst|$dst, $src}",
5064 [(store (i32 (vector_extract (v4i32 VR128:$src),
5065 (iPTR 0))), addr:$dst)],
5066 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
5068 def : Pat<(v8i32 (X86Vinsert (v8i32 immAllZerosV), GR32:$src2, (iPTR 0))),
5069 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
5071 def : Pat<(v4i64 (X86Vinsert (bc_v4i64 (v8i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
5072 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
5074 def : Pat<(v8i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
5075 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src2), sub_xmm)>;
5077 def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
5078 (SUBREG_TO_REG (i32 0), (VMOV64toPQIrr GR64:$src2), sub_xmm)>;
5080 //===---------------------------------------------------------------------===//
5081 // Move Packed Doubleword Int first element to Doubleword Int
5082 //
5083 let SchedRW = [WriteMove] in {
5084 def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
5085 "movq\t{$src, $dst|$dst, $src}",
5086 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
5087 (iPTR 0)))],
5088 IIC_SSE_MOVD_ToGP>,
5089 VEX;
5091 def MOVPQIto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
5092 "mov{d|q}\t{$src, $dst|$dst, $src}",
5093 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
5094 (iPTR 0)))],
5095 IIC_SSE_MOVD_ToGP>;
5096 } //SchedRW
5098 //===---------------------------------------------------------------------===//
5099 // Bitcast FR64 <-> GR64
5100 //
5101 let isCodeGenOnly = 1 in {
5102 let Predicates = [UseAVX] in
5103 def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
5104 "movq\t{$src, $dst|$dst, $src}",
5105 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
5106 VEX, Sched<[WriteLoad]>;
5107 def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
5108 "movq\t{$src, $dst|$dst, $src}",
5109 [(set GR64:$dst, (bitconvert FR64:$src))],
5110 IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
5111 def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
5112 "movq\t{$src, $dst|$dst, $src}",
5113 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
5114 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
5116 def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
5117 "movq\t{$src, $dst|$dst, $src}",
5118 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))],
5119 IIC_SSE_MOVDQ>, Sched<[WriteLoad]>;
5120 def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
5121 "mov{d|q}\t{$src, $dst|$dst, $src}",
5122 [(set GR64:$dst, (bitconvert FR64:$src))],
5123 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
5124 def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
5125 "movq\t{$src, $dst|$dst, $src}",
5126 [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
5127 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
5128 }
5130 //===---------------------------------------------------------------------===//
5131 // Move Scalar Single to Double Int
5132 //
5133 let isCodeGenOnly = 1 in {
5134 def VMOVSS2DIrr : VS2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
5135 "movd\t{$src, $dst|$dst, $src}",
5136 [(set GR32:$dst, (bitconvert FR32:$src))],
5137 IIC_SSE_MOVD_ToGP>, VEX, Sched<[WriteMove]>;
5138 def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
5139 "movd\t{$src, $dst|$dst, $src}",
5140 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
5141 IIC_SSE_MOVDQ>, VEX, Sched<[WriteStore]>;
5142 def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
5143 "movd\t{$src, $dst|$dst, $src}",
5144 [(set GR32:$dst, (bitconvert FR32:$src))],
5145 IIC_SSE_MOVD_ToGP>, Sched<[WriteMove]>;
5146 def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
5147 "movd\t{$src, $dst|$dst, $src}",
5148 [(store (i32 (bitconvert FR32:$src)), addr:$dst)],
5149 IIC_SSE_MOVDQ>, Sched<[WriteStore]>;
5150 }
5152 //===---------------------------------------------------------------------===//
5153 // Patterns and instructions to describe movd/movq to XMM register zero-extends
5154 //
5155 let isCodeGenOnly = 1, SchedRW = [WriteMove] in {
5156 let AddedComplexity = 15 in {
5157 def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
5158 "movq\t{$src, $dst|$dst, $src}", // X86-64 only
5159 [(set VR128:$dst, (v2i64 (X86vzmovl
5160 (v2i64 (scalar_to_vector GR64:$src)))))],
5161 IIC_SSE_MOVDQ>,
5162 VEX, VEX_W;
5163 def MOVZQI2PQIrr : RS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
5164 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
5165 [(set VR128:$dst, (v2i64 (X86vzmovl
5166 (v2i64 (scalar_to_vector GR64:$src)))))],
5167 IIC_SSE_MOVDQ>;
5168 }
5169 } // isCodeGenOnly, SchedRW
5171 let Predicates = [UseAVX] in {
5172 let AddedComplexity = 15 in
5173 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
5174 (VMOVDI2PDIrr GR32:$src)>;
5176 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
5177 let AddedComplexity = 20 in {
5178 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
5179 (VMOVDI2PDIrm addr:$src)>;
5180 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
5181 (VMOVDI2PDIrm addr:$src)>;
5182 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
5183 (VMOVDI2PDIrm addr:$src)>;
5184 }
5185 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
5186 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
5187 (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
5188 (SUBREG_TO_REG (i32 0), (VMOVDI2PDIrr GR32:$src), sub_xmm)>;
5189 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
5190 (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
5191 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
5192 }
5194 let Predicates = [UseSSE2] in {
5195 let AddedComplexity = 15 in
5196 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
5197 (MOVDI2PDIrr GR32:$src)>;
5199 let AddedComplexity = 20 in {
5200 def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
5201 (MOVDI2PDIrm addr:$src)>;
5202 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
5203 (MOVDI2PDIrm addr:$src)>;
5204 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
5205 (MOVDI2PDIrm addr:$src)>;
5206 }
5207 }
5209 // These are the correct encodings of the instructions so that we know how to
5210 // read correct assembly, even though we continue to emit the wrong ones for
5211 // compatibility with Darwin's buggy assembler.
5212 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
5213 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
5214 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
5215 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
5216 // Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
5217 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
5218 (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
5219 def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
5220 (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
5222 //===---------------------------------------------------------------------===//
5223 // SSE2 - Move Quadword
5224 //===---------------------------------------------------------------------===//
5226 //===---------------------------------------------------------------------===//
5227 // Move Quadword Int to Packed Quadword Int
5228 //
5230 let SchedRW = [WriteLoad] in {
5231 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5232 "vmovq\t{$src, $dst|$dst, $src}",
5233 [(set VR128:$dst,
5234 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
5235 VEX, Requires<[UseAVX]>;
5236 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5237 "movq\t{$src, $dst|$dst, $src}",
5238 [(set VR128:$dst,
5239 (v2i64 (scalar_to_vector (loadi64 addr:$src))))],
5240 IIC_SSE_MOVDQ>, XS,
5241 Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix
5242 } // SchedRW
5244 //===---------------------------------------------------------------------===//
5245 // Move Packed Quadword Int to Quadword Int
5246 //
5247 let SchedRW = [WriteStore] in {
5248 def VMOVPQI2QImr : VS2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5249 "movq\t{$src, $dst|$dst, $src}",
5250 [(store (i64 (vector_extract (v2i64 VR128:$src),
5251 (iPTR 0))), addr:$dst)],
5252 IIC_SSE_MOVDQ>, VEX;
5253 def MOVPQI2QImr : S2I<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
5254 "movq\t{$src, $dst|$dst, $src}",
5255 [(store (i64 (vector_extract (v2i64 VR128:$src),
5256 (iPTR 0))), addr:$dst)],
5257 IIC_SSE_MOVDQ>;
5258 } // SchedRW
5260 // For disassembler only
5261 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
5262 SchedRW = [WriteVecLogic] in {
5263 def VMOVPQI2QIrr : VS2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5264 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>, VEX;
5265 def MOVPQI2QIrr : S2I<0xD6, MRMDestReg, (outs VR128:$dst), (ins VR128:$src),
5266 "movq\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVQ_RR>;
5267 }
5269 //===---------------------------------------------------------------------===//
5270 // Store / copy lower 64-bits of a XMM register.
5271 //
5272 let Predicates = [UseAVX] in
5273 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5274 (VMOVPQI2QImr addr:$dst, VR128:$src)>;
5275 let Predicates = [UseSSE2] in
5276 def : Pat<(int_x86_sse2_storel_dq addr:$dst, VR128:$src),
5277 (MOVPQI2QImr addr:$dst, VR128:$src)>;
5279 let isCodeGenOnly = 1, AddedComplexity = 20 in {
5280 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5281 "vmovq\t{$src, $dst|$dst, $src}",
5282 [(set VR128:$dst,
5283 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5284 (loadi64 addr:$src))))))],
5285 IIC_SSE_MOVDQ>,
5286 XS, VEX, Requires<[UseAVX]>, Sched<[WriteLoad]>;
5288 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
5289 "movq\t{$src, $dst|$dst, $src}",
5290 [(set VR128:$dst,
5291 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
5292 (loadi64 addr:$src))))))],
5293 IIC_SSE_MOVDQ>,
5294 XS, Requires<[UseSSE2]>, Sched<[WriteLoad]>;
5295 }
5297 let Predicates = [UseAVX], AddedComplexity = 20 in {
5298 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5299 (VMOVZQI2PQIrm addr:$src)>;
5300 def : Pat<(v2i64 (X86vzload addr:$src)),
5301 (VMOVZQI2PQIrm addr:$src)>;
5302 }
5304 let Predicates = [UseSSE2], AddedComplexity = 20 in {
5305 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
5306 (MOVZQI2PQIrm addr:$src)>;
5307 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
5308 }
5310 let Predicates = [HasAVX] in {
5311 def : Pat<(v4i64 (alignedX86vzload addr:$src)),
5312 (SUBREG_TO_REG (i32 0), (VMOVAPSrm addr:$src), sub_xmm)>;
5313 def : Pat<(v4i64 (X86vzload addr:$src)),
5314 (SUBREG_TO_REG (i32 0), (VMOVUPSrm addr:$src), sub_xmm)>;
5315 }
5317 //===---------------------------------------------------------------------===//
5318 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
5319 // IA32 document. movq xmm1, xmm2 does clear the high bits.
5320 //
5321 let SchedRW = [WriteVecLogic] in {
5322 let AddedComplexity = 15 in
5323 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5324 "vmovq\t{$src, $dst|$dst, $src}",
5325 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5326 IIC_SSE_MOVQ_RR>,
5327 XS, VEX, Requires<[UseAVX]>;
5328 let AddedComplexity = 15 in
5329 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5330 "movq\t{$src, $dst|$dst, $src}",
5331 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))],
5332 IIC_SSE_MOVQ_RR>,
5333 XS, Requires<[UseSSE2]>;
5334 } // SchedRW
5336 let isCodeGenOnly = 1, SchedRW = [WriteVecLogicLd] in {
5337 let AddedComplexity = 20 in
5338 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5339 "vmovq\t{$src, $dst|$dst, $src}",
5340 [(set VR128:$dst, (v2i64 (X86vzmovl
5341 (loadv2i64 addr:$src))))],
5342 IIC_SSE_MOVDQ>,
5343 XS, VEX, Requires<[UseAVX]>;
5344 let AddedComplexity = 20 in {
5345 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5346 "movq\t{$src, $dst|$dst, $src}",
5347 [(set VR128:$dst, (v2i64 (X86vzmovl
5348 (loadv2i64 addr:$src))))],
5349 IIC_SSE_MOVDQ>,
5350 XS, Requires<[UseSSE2]>;
5351 }
5352 } // isCodeGenOnly, SchedRW
5354 let AddedComplexity = 20 in {
5355 let Predicates = [UseAVX] in {
5356 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5357 (VMOVZPQILo2PQIrr VR128:$src)>;
5358 }
5359 let Predicates = [UseSSE2] in {
5360 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
5361 (MOVZPQILo2PQIrr VR128:$src)>;
5362 }
5363 }
5365 //===---------------------------------------------------------------------===//
5366 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
5367 //===---------------------------------------------------------------------===//
5368 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
5369 ValueType vt, RegisterClass RC, PatFrag mem_frag,
5370 X86MemOperand x86memop> {
5371 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
5372 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5373 [(set RC:$dst, (vt (OpNode RC:$src)))],
5374 IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5375 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5376 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5377 [(set RC:$dst, (OpNode (mem_frag addr:$src)))],
5378 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5379 }
5381 let Predicates = [HasAVX] in {
5382 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5383 v4f32, VR128, loadv4f32, f128mem>, VEX;
5384 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5385 v4f32, VR128, loadv4f32, f128mem>, VEX;
5386 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
5387 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5388 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
5389 v8f32, VR256, loadv8f32, f256mem>, VEX, VEX_L;
5390 }
5391 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
5392 memopv4f32, f128mem>;
5393 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
5394 memopv4f32, f128mem>;
5396 let Predicates = [HasAVX] in {
5397 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5398 (VMOVSHDUPrr VR128:$src)>;
5399 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (loadv2i64 addr:$src)))),
5400 (VMOVSHDUPrm addr:$src)>;
5401 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5402 (VMOVSLDUPrr VR128:$src)>;
5403 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (loadv2i64 addr:$src)))),
5404 (VMOVSLDUPrm addr:$src)>;
5405 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
5406 (VMOVSHDUPYrr VR256:$src)>;
5407 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (loadv4i64 addr:$src)))),
5408 (VMOVSHDUPYrm addr:$src)>;
5409 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
5410 (VMOVSLDUPYrr VR256:$src)>;
5411 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (loadv4i64 addr:$src)))),
5412 (VMOVSLDUPYrm addr:$src)>;
5413 }
5415 let Predicates = [UseSSE3] in {
5416 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5417 (MOVSHDUPrr VR128:$src)>;
5418 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
5419 (MOVSHDUPrm addr:$src)>;
5420 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5421 (MOVSLDUPrr VR128:$src)>;
5422 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
5423 (MOVSLDUPrm addr:$src)>;
5424 }
5426 //===---------------------------------------------------------------------===//
5427 // SSE3 - Replicate Double FP - MOVDDUP
5428 //===---------------------------------------------------------------------===//
5430 multiclass sse3_replicate_dfp<string OpcodeStr> {
5431 let hasSideEffects = 0 in
5432 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
5433 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5434 [], IIC_SSE_MOV_LH>, Sched<[WriteFShuffle]>;
5435 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
5436 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5437 [(set VR128:$dst,
5438 (v2f64 (X86Movddup
5439 (scalar_to_vector (loadf64 addr:$src)))))],
5440 IIC_SSE_MOV_LH>, Sched<[WriteLoad]>;
5441 }
5443 // FIXME: Merge with above classe when there're patterns for the ymm version
5444 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
5445 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
5446 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5447 [(set VR256:$dst, (v4f64 (X86Movddup VR256:$src)))]>,
5448 Sched<[WriteFShuffle]>;
5449 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
5450 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5451 [(set VR256:$dst,
5452 (v4f64 (X86Movddup
5453 (scalar_to_vector (loadf64 addr:$src)))))]>,
5454 Sched<[WriteLoad]>;
5455 }
5457 let Predicates = [HasAVX] in {
5458 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
5459 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L;
5460 }
5462 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
5464 let Predicates = [HasAVX] in {
5465 def : Pat<(X86Movddup (loadv2f64 addr:$src)),
5466 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5467 def : Pat<(X86Movddup (bc_v2f64 (loadv4f32 addr:$src))),
5468 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5469 def : Pat<(X86Movddup (bc_v2f64 (loadv2i64 addr:$src))),
5470 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5471 def : Pat<(X86Movddup (bc_v2f64
5472 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5473 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5475 // 256-bit version
5476 def : Pat<(X86Movddup (loadv4f64 addr:$src)),
5477 (VMOVDDUPYrm addr:$src)>;
5478 def : Pat<(X86Movddup (loadv4i64 addr:$src)),
5479 (VMOVDDUPYrm addr:$src)>;
5480 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
5481 (VMOVDDUPYrm addr:$src)>;
5482 def : Pat<(X86Movddup (v4i64 VR256:$src)),
5483 (VMOVDDUPYrr VR256:$src)>;
5484 }
5486 let Predicates = [UseAVX, OptForSize] in {
5487 def : Pat<(v2f64 (X86VBroadcast (loadf64 addr:$src))),
5488 (VMOVDDUPrm addr:$src)>;
5489 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
5490 (VMOVDDUPrm addr:$src)>;
5491 }
5493 let Predicates = [UseSSE3] in {
5494 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5495 (MOVDDUPrm addr:$src)>;
5496 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5497 (MOVDDUPrm addr:$src)>;
5498 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5499 (MOVDDUPrm addr:$src)>;
5500 def : Pat<(X86Movddup (bc_v2f64
5501 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5502 (MOVDDUPrm addr:$src)>;
5503 }
5505 //===---------------------------------------------------------------------===//
5506 // SSE3 - Move Unaligned Integer
5507 //===---------------------------------------------------------------------===//
5509 let SchedRW = [WriteLoad] in {
5510 let Predicates = [HasAVX] in {
5511 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5512 "vlddqu\t{$src, $dst|$dst, $src}",
5513 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
5514 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
5515 "vlddqu\t{$src, $dst|$dst, $src}",
5516 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>,
5517 VEX, VEX_L;
5518 }
5519 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5520 "lddqu\t{$src, $dst|$dst, $src}",
5521 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))],
5522 IIC_SSE_LDDQU>;
5523 }
5525 //===---------------------------------------------------------------------===//
5526 // SSE3 - Arithmetic
5527 //===---------------------------------------------------------------------===//
5529 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
5530 X86MemOperand x86memop, OpndItins itins,
5531 bit Is2Addr = 1> {
5532 def rr : I<0xD0, MRMSrcReg,
5533 (outs RC:$dst), (ins RC:$src1, RC:$src2),
5534 !if(Is2Addr,
5535 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5536 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5537 [(set RC:$dst, (Int RC:$src1, RC:$src2))], itins.rr>,
5538 Sched<[itins.Sched]>;
5539 def rm : I<0xD0, MRMSrcMem,
5540 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5541 !if(Is2Addr,
5542 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5543 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5544 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))], itins.rr>,
5545 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5546 }
5548 let Predicates = [HasAVX] in {
5549 let ExeDomain = SSEPackedSingle in {
5550 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
5551 f128mem, SSE_ALU_F32P, 0>, XD, VEX_4V;
5552 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
5553 f256mem, SSE_ALU_F32P, 0>, XD, VEX_4V, VEX_L;
5554 }
5555 let ExeDomain = SSEPackedDouble in {
5556 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
5557 f128mem, SSE_ALU_F64P, 0>, PD, VEX_4V;
5558 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
5559 f256mem, SSE_ALU_F64P, 0>, PD, VEX_4V, VEX_L;
5560 }
5561 }
5562 let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in {
5563 let ExeDomain = SSEPackedSingle in
5564 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
5565 f128mem, SSE_ALU_F32P>, XD;
5566 let ExeDomain = SSEPackedDouble in
5567 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
5568 f128mem, SSE_ALU_F64P>, PD;
5569 }
5571 // Patterns used to select 'addsub' instructions.
5572 let Predicates = [HasAVX] in {
5573 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5574 (VADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5575 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 (memop addr:$rhs)))),
5576 (VADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5577 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5578 (VADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5579 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 (memop addr:$rhs)))),
5580 (VADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5582 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 VR256:$rhs))),
5583 (VADDSUBPSYrr VR256:$lhs, VR256:$rhs)>;
5584 def : Pat<(v8f32 (X86Addsub (v8f32 VR256:$lhs), (v8f32 (memop addr:$rhs)))),
5585 (VADDSUBPSYrm VR256:$lhs, f256mem:$rhs)>;
5586 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 VR256:$rhs))),
5587 (VADDSUBPDYrr VR256:$lhs, VR256:$rhs)>;
5588 def : Pat<(v4f64 (X86Addsub (v4f64 VR256:$lhs), (v4f64 (memop addr:$rhs)))),
5589 (VADDSUBPDYrm VR256:$lhs, f256mem:$rhs)>;
5590 }
5592 let Predicates = [UseSSE3] in {
5593 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 VR128:$rhs))),
5594 (ADDSUBPSrr VR128:$lhs, VR128:$rhs)>;
5595 def : Pat<(v4f32 (X86Addsub (v4f32 VR128:$lhs), (v4f32 (memop addr:$rhs)))),
5596 (ADDSUBPSrm VR128:$lhs, f128mem:$rhs)>;
5597 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 VR128:$rhs))),
5598 (ADDSUBPDrr VR128:$lhs, VR128:$rhs)>;
5599 def : Pat<(v2f64 (X86Addsub (v2f64 VR128:$lhs), (v2f64 (memop addr:$rhs)))),
5600 (ADDSUBPDrm VR128:$lhs, f128mem:$rhs)>;
5601 }
5603 //===---------------------------------------------------------------------===//
5604 // SSE3 Instructions
5605 //===---------------------------------------------------------------------===//
5607 // Horizontal ops
5608 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5609 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5610 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5611 !if(Is2Addr,
5612 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5613 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5614 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5615 Sched<[WriteFAdd]>;
5617 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5618 !if(Is2Addr,
5619 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5620 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5621 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5622 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5623 }
5624 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
5625 X86MemOperand x86memop, SDNode OpNode, bit Is2Addr = 1> {
5626 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
5627 !if(Is2Addr,
5628 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5629 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5630 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], IIC_SSE_HADDSUB_RR>,
5631 Sched<[WriteFAdd]>;
5633 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
5634 !if(Is2Addr,
5635 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5636 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5637 [(set RC:$dst, (vt (OpNode RC:$src1, (memop addr:$src2))))],
5638 IIC_SSE_HADDSUB_RM>, Sched<[WriteFAddLd, ReadAfterLd]>;
5639 }
5641 let Predicates = [HasAVX] in {
5642 let ExeDomain = SSEPackedSingle in {
5643 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
5644 X86fhadd, 0>, VEX_4V;
5645 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
5646 X86fhsub, 0>, VEX_4V;
5647 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
5648 X86fhadd, 0>, VEX_4V, VEX_L;
5649 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
5650 X86fhsub, 0>, VEX_4V, VEX_L;
5651 }
5652 let ExeDomain = SSEPackedDouble in {
5653 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
5654 X86fhadd, 0>, VEX_4V;
5655 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
5656 X86fhsub, 0>, VEX_4V;
5657 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
5658 X86fhadd, 0>, VEX_4V, VEX_L;
5659 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
5660 X86fhsub, 0>, VEX_4V, VEX_L;
5661 }
5662 }
5664 let Constraints = "$src1 = $dst" in {
5665 let ExeDomain = SSEPackedSingle in {
5666 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, X86fhadd>;
5667 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, X86fhsub>;
5668 }
5669 let ExeDomain = SSEPackedDouble in {
5670 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, X86fhadd>;
5671 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, X86fhsub>;
5672 }
5673 }
5675 //===---------------------------------------------------------------------===//
5676 // SSSE3 - Packed Absolute Instructions
5677 //===---------------------------------------------------------------------===//
5680 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5681 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
5682 Intrinsic IntId128> {
5683 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5684 (ins VR128:$src),
5685 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5686 [(set VR128:$dst, (IntId128 VR128:$src))], IIC_SSE_PABS_RR>,
5687 Sched<[WriteVecALU]>;
5689 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5690 (ins i128mem:$src),
5691 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5692 [(set VR128:$dst,
5693 (IntId128
5694 (bitconvert (memopv2i64 addr:$src))))], IIC_SSE_PABS_RM>,
5695 Sched<[WriteVecALULd]>;
5696 }
5698 /// SS3I_unop_rm_int_y - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
5699 multiclass SS3I_unop_rm_int_y<bits<8> opc, string OpcodeStr,
5700 Intrinsic IntId256> {
5701 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5702 (ins VR256:$src),
5703 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5704 [(set VR256:$dst, (IntId256 VR256:$src))]>,
5705 Sched<[WriteVecALU]>;
5707 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5708 (ins i256mem:$src),
5709 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5710 [(set VR256:$dst,
5711 (IntId256
5712 (bitconvert (memopv4i64 addr:$src))))]>,
5713 Sched<[WriteVecALULd]>;
5714 }
5716 // Helper fragments to match sext vXi1 to vXiY.
5717 def v16i1sextv16i8 : PatLeaf<(v16i8 (X86pcmpgt (bc_v16i8 (v4i32 immAllZerosV)),
5718 VR128:$src))>;
5719 def v8i1sextv8i16 : PatLeaf<(v8i16 (X86vsrai VR128:$src, (i8 15)))>;
5720 def v4i1sextv4i32 : PatLeaf<(v4i32 (X86vsrai VR128:$src, (i8 31)))>;
5721 def v32i1sextv32i8 : PatLeaf<(v32i8 (X86pcmpgt (bc_v32i8 (v8i32 immAllZerosV)),
5722 VR256:$src))>;
5723 def v16i1sextv16i16: PatLeaf<(v16i16 (X86vsrai VR256:$src, (i8 15)))>;
5724 def v8i1sextv8i32 : PatLeaf<(v8i32 (X86vsrai VR256:$src, (i8 31)))>;
5726 let Predicates = [HasAVX] in {
5727 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb",
5728 int_x86_ssse3_pabs_b_128>, VEX;
5729 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw",
5730 int_x86_ssse3_pabs_w_128>, VEX;
5731 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd",
5732 int_x86_ssse3_pabs_d_128>, VEX;
5734 def : Pat<(xor
5735 (bc_v2i64 (v16i1sextv16i8)),
5736 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5737 (VPABSBrr128 VR128:$src)>;
5738 def : Pat<(xor
5739 (bc_v2i64 (v8i1sextv8i16)),
5740 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5741 (VPABSWrr128 VR128:$src)>;
5742 def : Pat<(xor
5743 (bc_v2i64 (v4i1sextv4i32)),
5744 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5745 (VPABSDrr128 VR128:$src)>;
5746 }
5748 let Predicates = [HasAVX2] in {
5749 defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb",
5750 int_x86_avx2_pabs_b>, VEX, VEX_L;
5751 defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw",
5752 int_x86_avx2_pabs_w>, VEX, VEX_L;
5753 defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd",
5754 int_x86_avx2_pabs_d>, VEX, VEX_L;
5756 def : Pat<(xor
5757 (bc_v4i64 (v32i1sextv32i8)),
5758 (bc_v4i64 (add (v32i8 VR256:$src), (v32i1sextv32i8)))),
5759 (VPABSBrr256 VR256:$src)>;
5760 def : Pat<(xor
5761 (bc_v4i64 (v16i1sextv16i16)),
5762 (bc_v4i64 (add (v16i16 VR256:$src), (v16i1sextv16i16)))),
5763 (VPABSWrr256 VR256:$src)>;
5764 def : Pat<(xor
5765 (bc_v4i64 (v8i1sextv8i32)),
5766 (bc_v4i64 (add (v8i32 VR256:$src), (v8i1sextv8i32)))),
5767 (VPABSDrr256 VR256:$src)>;
5768 }
5770 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb",
5771 int_x86_ssse3_pabs_b_128>;
5772 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw",
5773 int_x86_ssse3_pabs_w_128>;
5774 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd",
5775 int_x86_ssse3_pabs_d_128>;
5777 let Predicates = [HasSSSE3] in {
5778 def : Pat<(xor
5779 (bc_v2i64 (v16i1sextv16i8)),
5780 (bc_v2i64 (add (v16i8 VR128:$src), (v16i1sextv16i8)))),
5781 (PABSBrr128 VR128:$src)>;
5782 def : Pat<(xor
5783 (bc_v2i64 (v8i1sextv8i16)),
5784 (bc_v2i64 (add (v8i16 VR128:$src), (v8i1sextv8i16)))),
5785 (PABSWrr128 VR128:$src)>;
5786 def : Pat<(xor
5787 (bc_v2i64 (v4i1sextv4i32)),
5788 (bc_v2i64 (add (v4i32 VR128:$src), (v4i1sextv4i32)))),
5789 (PABSDrr128 VR128:$src)>;
5790 }
5792 //===---------------------------------------------------------------------===//
5793 // SSSE3 - Packed Binary Operator Instructions
5794 //===---------------------------------------------------------------------===//
5796 let Sched = WriteVecALU in {
5797 def SSE_PHADDSUBD : OpndItins<
5798 IIC_SSE_PHADDSUBD_RR, IIC_SSE_PHADDSUBD_RM
5799 >;
5800 def SSE_PHADDSUBSW : OpndItins<
5801 IIC_SSE_PHADDSUBSW_RR, IIC_SSE_PHADDSUBSW_RM
5802 >;
5803 def SSE_PHADDSUBW : OpndItins<
5804 IIC_SSE_PHADDSUBW_RR, IIC_SSE_PHADDSUBW_RM
5805 >;
5806 }
5807 let Sched = WriteShuffle in
5808 def SSE_PSHUFB : OpndItins<
5809 IIC_SSE_PSHUFB_RR, IIC_SSE_PSHUFB_RM
5810 >;
5811 let Sched = WriteVecALU in
5812 def SSE_PSIGN : OpndItins<
5813 IIC_SSE_PSIGN_RR, IIC_SSE_PSIGN_RM
5814 >;
5815 let Sched = WriteVecIMul in
5816 def SSE_PMULHRSW : OpndItins<
5817 IIC_SSE_PMULHRSW, IIC_SSE_PMULHRSW
5818 >;
5820 /// SS3I_binop_rm - Simple SSSE3 bin op
5821 multiclass SS3I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5822 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
5823 X86MemOperand x86memop, OpndItins itins,
5824 bit Is2Addr = 1> {
5825 let isCommutable = 1 in
5826 def rr : SS38I<opc, MRMSrcReg, (outs RC:$dst),
5827 (ins RC:$src1, RC:$src2),
5828 !if(Is2Addr,
5829 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5830 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5831 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))], itins.rr>,
5832 Sched<[itins.Sched]>;
5833 def rm : SS38I<opc, MRMSrcMem, (outs RC:$dst),
5834 (ins RC:$src1, x86memop:$src2),
5835 !if(Is2Addr,
5836 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5837 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5838 [(set RC:$dst,
5839 (OpVT (OpNode RC:$src1,
5840 (bitconvert (memop_frag addr:$src2)))))], itins.rm>,
5841 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5842 }
5844 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
5845 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
5846 Intrinsic IntId128, OpndItins itins,
5847 bit Is2Addr = 1> {
5848 let isCommutable = 1 in
5849 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
5850 (ins VR128:$src1, VR128:$src2),
5851 !if(Is2Addr,
5852 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5853 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5854 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5855 Sched<[itins.Sched]>;
5856 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
5857 (ins VR128:$src1, i128mem:$src2),
5858 !if(Is2Addr,
5859 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5860 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5861 [(set VR128:$dst,
5862 (IntId128 VR128:$src1,
5863 (bitconvert (memopv2i64 addr:$src2))))]>,
5864 Sched<[itins.Sched.Folded, ReadAfterLd]>;
5865 }
5867 multiclass SS3I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
5868 Intrinsic IntId256,
5869 X86FoldableSchedWrite Sched> {
5870 let isCommutable = 1 in
5871 def rr256 : SS38I<opc, MRMSrcReg, (outs VR256:$dst),
5872 (ins VR256:$src1, VR256:$src2),
5873 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5874 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
5875 Sched<[Sched]>;
5876 def rm256 : SS38I<opc, MRMSrcMem, (outs VR256:$dst),
5877 (ins VR256:$src1, i256mem:$src2),
5878 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5879 [(set VR256:$dst,
5880 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
5881 Sched<[Sched.Folded, ReadAfterLd]>;
5882 }
5884 let ImmT = NoImm, Predicates = [HasAVX] in {
5885 let isCommutable = 0 in {
5886 defm VPHADDW : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v8i16, VR128,
5887 loadv2i64, i128mem,
5888 SSE_PHADDSUBW, 0>, VEX_4V;
5889 defm VPHADDD : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v4i32, VR128,
5890 loadv2i64, i128mem,
5891 SSE_PHADDSUBD, 0>, VEX_4V;
5892 defm VPHSUBW : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v8i16, VR128,
5893 loadv2i64, i128mem,
5894 SSE_PHADDSUBW, 0>, VEX_4V;
5895 defm VPHSUBD : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v4i32, VR128,
5896 loadv2i64, i128mem,
5897 SSE_PHADDSUBD, 0>, VEX_4V;
5898 defm VPSIGNB : SS3I_binop_rm<0x08, "vpsignb", X86psign, v16i8, VR128,
5899 loadv2i64, i128mem,
5900 SSE_PSIGN, 0>, VEX_4V;
5901 defm VPSIGNW : SS3I_binop_rm<0x09, "vpsignw", X86psign, v8i16, VR128,
5902 loadv2i64, i128mem,
5903 SSE_PSIGN, 0>, VEX_4V;
5904 defm VPSIGND : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v4i32, VR128,
5905 loadv2i64, i128mem,
5906 SSE_PSIGN, 0>, VEX_4V;
5907 defm VPSHUFB : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v16i8, VR128,
5908 loadv2i64, i128mem,
5909 SSE_PSHUFB, 0>, VEX_4V;
5910 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw",
5911 int_x86_ssse3_phadd_sw_128,
5912 SSE_PHADDSUBSW, 0>, VEX_4V;
5913 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw",
5914 int_x86_ssse3_phsub_sw_128,
5915 SSE_PHADDSUBSW, 0>, VEX_4V;
5916 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw",
5917 int_x86_ssse3_pmadd_ub_sw_128,
5918 SSE_PMADD, 0>, VEX_4V;
5919 }
5920 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw",
5921 int_x86_ssse3_pmul_hr_sw_128,
5922 SSE_PMULHRSW, 0>, VEX_4V;
5923 }
5925 let ImmT = NoImm, Predicates = [HasAVX2] in {
5926 let isCommutable = 0 in {
5927 defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256,
5928 loadv4i64, i256mem,
5929 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5930 defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256,
5931 loadv4i64, i256mem,
5932 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5933 defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256,
5934 loadv4i64, i256mem,
5935 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5936 defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256,
5937 loadv4i64, i256mem,
5938 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5939 defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256,
5940 loadv4i64, i256mem,
5941 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5942 defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256,
5943 loadv4i64, i256mem,
5944 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5945 defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256,
5946 loadv4i64, i256mem,
5947 SSE_PHADDSUBW, 0>, VEX_4V, VEX_L;
5948 defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256,
5949 loadv4i64, i256mem,
5950 SSE_PSHUFB, 0>, VEX_4V, VEX_L;
5951 defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw",
5952 int_x86_avx2_phadd_sw,
5953 WriteVecALU>, VEX_4V, VEX_L;
5954 defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw",
5955 int_x86_avx2_phsub_sw,
5956 WriteVecALU>, VEX_4V, VEX_L;
5957 defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw",
5958 int_x86_avx2_pmadd_ub_sw,
5959 WriteVecIMul>, VEX_4V, VEX_L;
5960 }
5961 defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw",
5962 int_x86_avx2_pmul_hr_sw,
5963 WriteVecIMul>, VEX_4V, VEX_L;
5964 }
5966 // None of these have i8 immediate fields.
5967 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
5968 let isCommutable = 0 in {
5969 defm PHADDW : SS3I_binop_rm<0x01, "phaddw", X86hadd, v8i16, VR128,
5970 memopv2i64, i128mem, SSE_PHADDSUBW>;
5971 defm PHADDD : SS3I_binop_rm<0x02, "phaddd", X86hadd, v4i32, VR128,
5972 memopv2i64, i128mem, SSE_PHADDSUBD>;
5973 defm PHSUBW : SS3I_binop_rm<0x05, "phsubw", X86hsub, v8i16, VR128,
5974 memopv2i64, i128mem, SSE_PHADDSUBW>;
5975 defm PHSUBD : SS3I_binop_rm<0x06, "phsubd", X86hsub, v4i32, VR128,
5976 memopv2i64, i128mem, SSE_PHADDSUBD>;
5977 defm PSIGNB : SS3I_binop_rm<0x08, "psignb", X86psign, v16i8, VR128,
5978 memopv2i64, i128mem, SSE_PSIGN>;
5979 defm PSIGNW : SS3I_binop_rm<0x09, "psignw", X86psign, v8i16, VR128,
5980 memopv2i64, i128mem, SSE_PSIGN>;
5981 defm PSIGND : SS3I_binop_rm<0x0A, "psignd", X86psign, v4i32, VR128,
5982 memopv2i64, i128mem, SSE_PSIGN>;
5983 defm PSHUFB : SS3I_binop_rm<0x00, "pshufb", X86pshufb, v16i8, VR128,
5984 memopv2i64, i128mem, SSE_PSHUFB>;
5985 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw",
5986 int_x86_ssse3_phadd_sw_128,
5987 SSE_PHADDSUBSW>;
5988 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw",
5989 int_x86_ssse3_phsub_sw_128,
5990 SSE_PHADDSUBSW>;
5991 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw",
5992 int_x86_ssse3_pmadd_ub_sw_128, SSE_PMADD>;
5993 }
5994 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw",
5995 int_x86_ssse3_pmul_hr_sw_128,
5996 SSE_PMULHRSW>;
5997 }
5999 //===---------------------------------------------------------------------===//
6000 // SSSE3 - Packed Align Instruction Patterns
6001 //===---------------------------------------------------------------------===//
6003 multiclass ssse3_palignr<string asm, bit Is2Addr = 1> {
6004 let hasSideEffects = 0 in {
6005 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
6006 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6007 !if(Is2Addr,
6008 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6009 !strconcat(asm,
6010 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6011 [], IIC_SSE_PALIGNRR>, Sched<[WriteShuffle]>;
6012 let mayLoad = 1 in
6013 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
6014 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6015 !if(Is2Addr,
6016 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6017 !strconcat(asm,
6018 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6019 [], IIC_SSE_PALIGNRM>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6020 }
6021 }
6023 multiclass ssse3_palignr_y<string asm, bit Is2Addr = 1> {
6024 let hasSideEffects = 0 in {
6025 def R256rr : SS3AI<0x0F, MRMSrcReg, (outs VR256:$dst),
6026 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
6027 !strconcat(asm,
6028 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6029 []>, Sched<[WriteShuffle]>;
6030 let mayLoad = 1 in
6031 def R256rm : SS3AI<0x0F, MRMSrcMem, (outs VR256:$dst),
6032 (ins VR256:$src1, i256mem:$src2, i8imm:$src3),
6033 !strconcat(asm,
6034 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
6035 []>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6036 }
6037 }
6039 let Predicates = [HasAVX] in
6040 defm VPALIGN : ssse3_palignr<"vpalignr", 0>, VEX_4V;
6041 let Predicates = [HasAVX2] in
6042 defm VPALIGN : ssse3_palignr_y<"vpalignr", 0>, VEX_4V, VEX_L;
6043 let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in
6044 defm PALIGN : ssse3_palignr<"palignr">;
6046 let Predicates = [HasAVX2] in {
6047 def : Pat<(v8i32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6048 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
6049 def : Pat<(v8f32 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6050 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
6051 def : Pat<(v16i16 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6052 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
6053 def : Pat<(v32i8 (X86PAlignr VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6054 (VPALIGNR256rr VR256:$src2, VR256:$src1, imm:$imm)>;
6055 }
6057 let Predicates = [HasAVX] in {
6058 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6059 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6060 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6061 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6062 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6063 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6064 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6065 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6066 }
6068 let Predicates = [UseSSSE3] in {
6069 def : Pat<(v4i32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6070 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6071 def : Pat<(v4f32 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6072 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6073 def : Pat<(v8i16 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6074 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6075 def : Pat<(v16i8 (X86PAlignr VR128:$src1, VR128:$src2, (i8 imm:$imm))),
6076 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
6077 }
6079 //===---------------------------------------------------------------------===//
6080 // SSSE3 - Thread synchronization
6081 //===---------------------------------------------------------------------===//
6083 let SchedRW = [WriteSystem] in {
6084 let usesCustomInserter = 1 in {
6085 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
6086 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>,
6087 Requires<[HasSSE3]>;
6088 }
6090 let Uses = [EAX, ECX, EDX] in
6091 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", [], IIC_SSE_MONITOR>,
6092 TB, Requires<[HasSSE3]>;
6093 let Uses = [ECX, EAX] in
6094 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait",
6095 [(int_x86_sse3_mwait ECX, EAX)], IIC_SSE_MWAIT>,
6096 TB, Requires<[HasSSE3]>;
6097 } // SchedRW
6099 def : InstAlias<"mwait\t{%eax, %ecx|ecx, eax}", (MWAITrr)>, Requires<[Not64BitMode]>;
6100 def : InstAlias<"mwait\t{%rax, %rcx|rcx, rax}", (MWAITrr)>, Requires<[In64BitMode]>;
6102 def : InstAlias<"monitor\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORrrr)>,
6103 Requires<[Not64BitMode]>;
6104 def : InstAlias<"monitor\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORrrr)>,
6105 Requires<[In64BitMode]>;
6107 //===----------------------------------------------------------------------===//
6108 // SSE4.1 - Packed Move with Sign/Zero Extend
6109 //===----------------------------------------------------------------------===//
6111 multiclass SS41I_pmovx_rrrm<bits<8> opc, string OpcodeStr, X86MemOperand MemOp,
6112 RegisterClass OutRC, RegisterClass InRC,
6113 OpndItins itins> {
6114 def rr : SS48I<opc, MRMSrcReg, (outs OutRC:$dst), (ins InRC:$src),
6115 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6116 [], itins.rr>,
6117 Sched<[itins.Sched]>;
6119 def rm : SS48I<opc, MRMSrcMem, (outs OutRC:$dst), (ins MemOp:$src),
6120 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6121 [],
6122 itins.rm>, Sched<[itins.Sched.Folded]>;
6123 }
6125 multiclass SS41I_pmovx_rm_all<bits<8> opc, string OpcodeStr,
6126 X86MemOperand MemOp, X86MemOperand MemYOp,
6127 OpndItins SSEItins, OpndItins AVXItins,
6128 OpndItins AVX2Itins> {
6129 defm NAME : SS41I_pmovx_rrrm<opc, OpcodeStr, MemOp, VR128, VR128, SSEItins>;
6130 let Predicates = [HasAVX] in
6131 defm V#NAME : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemOp,
6132 VR128, VR128, AVXItins>, VEX;
6133 let Predicates = [HasAVX2] in
6134 defm V#NAME#Y : SS41I_pmovx_rrrm<opc, !strconcat("v", OpcodeStr), MemYOp,
6135 VR256, VR128, AVX2Itins>, VEX, VEX_L;
6136 }
6138 multiclass SS41I_pmovx_rm<bits<8> opc, string OpcodeStr,
6139 X86MemOperand MemOp, X86MemOperand MemYOp> {
6140 defm PMOVSX#NAME : SS41I_pmovx_rm_all<opc, !strconcat("pmovsx", OpcodeStr),
6141 MemOp, MemYOp,
6142 SSE_INTALU_ITINS_SHUFF_P,
6143 DEFAULT_ITINS_SHUFFLESCHED,
6144 DEFAULT_ITINS_SHUFFLESCHED>;
6145 defm PMOVZX#NAME : SS41I_pmovx_rm_all<!add(opc, 0x10),
6146 !strconcat("pmovzx", OpcodeStr),
6147 MemOp, MemYOp,
6148 SSE_INTALU_ITINS_SHUFF_P,
6149 DEFAULT_ITINS_SHUFFLESCHED,
6150 DEFAULT_ITINS_SHUFFLESCHED>;
6151 }
6153 defm BW : SS41I_pmovx_rm<0x20, "bw", i64mem, i128mem>;
6154 defm WD : SS41I_pmovx_rm<0x23, "wd", i64mem, i128mem>;
6155 defm DQ : SS41I_pmovx_rm<0x25, "dq", i64mem, i128mem>;
6157 defm BD : SS41I_pmovx_rm<0x21, "bd", i32mem, i64mem>;
6158 defm WQ : SS41I_pmovx_rm<0x24, "wq", i32mem, i64mem>;
6160 defm BQ : SS41I_pmovx_rm<0x22, "bq", i16mem, i32mem>;
6162 // AVX2 Patterns
6163 multiclass SS41I_pmovx_avx2_patterns<string OpcPrefix, SDNode ExtOp> {
6164 // Register-Register patterns
6165 def : Pat<(v16i16 (ExtOp (v16i8 VR128:$src))),
6166 (!cast<I>(OpcPrefix#BWYrr) VR128:$src)>;
6167 def : Pat<(v8i32 (ExtOp (v16i8 VR128:$src))),
6168 (!cast<I>(OpcPrefix#BDYrr) VR128:$src)>;
6169 def : Pat<(v4i64 (ExtOp (v16i8 VR128:$src))),
6170 (!cast<I>(OpcPrefix#BQYrr) VR128:$src)>;
6172 def : Pat<(v8i32 (ExtOp (v8i16 VR128:$src))),
6173 (!cast<I>(OpcPrefix#WDYrr) VR128:$src)>;
6174 def : Pat<(v4i64 (ExtOp (v8i16 VR128:$src))),
6175 (!cast<I>(OpcPrefix#WQYrr) VR128:$src)>;
6177 def : Pat<(v4i64 (ExtOp (v4i32 VR128:$src))),
6178 (!cast<I>(OpcPrefix#DQYrr) VR128:$src)>;
6180 // AVX2 Register-Memory patterns
6181 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6182 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6183 def : Pat<(v16i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6184 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6185 def : Pat<(v16i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6186 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6187 def : Pat<(v16i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6188 (!cast<I>(OpcPrefix#BWYrm) addr:$src)>;
6190 def : Pat<(v8i32 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6191 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6192 def : Pat<(v8i32 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6193 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6194 def : Pat<(v8i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6195 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6196 def : Pat<(v8i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6197 (!cast<I>(OpcPrefix#BDYrm) addr:$src)>;
6199 def : Pat<(v4i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6200 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6201 def : Pat<(v4i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6202 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6203 def : Pat<(v4i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6204 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6205 def : Pat<(v4i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6206 (!cast<I>(OpcPrefix#BQYrm) addr:$src)>;
6208 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6209 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6210 def : Pat<(v8i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6211 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6212 def : Pat<(v8i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6213 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6214 def : Pat<(v8i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6215 (!cast<I>(OpcPrefix#WDYrm) addr:$src)>;
6217 def : Pat<(v4i64 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6218 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6219 def : Pat<(v4i64 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6220 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6221 def : Pat<(v4i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6222 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6223 def : Pat<(v4i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6224 (!cast<I>(OpcPrefix#WQYrm) addr:$src)>;
6226 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6227 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6228 def : Pat<(v4i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6229 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6230 def : Pat<(v4i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6231 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6232 def : Pat<(v4i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6233 (!cast<I>(OpcPrefix#DQYrm) addr:$src)>;
6234 }
6236 let Predicates = [HasAVX2] in {
6237 defm : SS41I_pmovx_avx2_patterns<"VPMOVSX", X86vsext>;
6238 defm : SS41I_pmovx_avx2_patterns<"VPMOVZX", X86vzext>;
6239 }
6241 // SSE4.1/AVX patterns.
6242 multiclass SS41I_pmovx_patterns<string OpcPrefix, SDNode ExtOp,
6243 PatFrag ExtLoad16> {
6244 def : Pat<(v8i16 (ExtOp (v16i8 VR128:$src))),
6245 (!cast<I>(OpcPrefix#BWrr) VR128:$src)>;
6246 def : Pat<(v4i32 (ExtOp (v16i8 VR128:$src))),
6247 (!cast<I>(OpcPrefix#BDrr) VR128:$src)>;
6248 def : Pat<(v2i64 (ExtOp (v16i8 VR128:$src))),
6249 (!cast<I>(OpcPrefix#BQrr) VR128:$src)>;
6251 def : Pat<(v4i32 (ExtOp (v8i16 VR128:$src))),
6252 (!cast<I>(OpcPrefix#WDrr) VR128:$src)>;
6253 def : Pat<(v2i64 (ExtOp (v8i16 VR128:$src))),
6254 (!cast<I>(OpcPrefix#WQrr) VR128:$src)>;
6256 def : Pat<(v2i64 (ExtOp (v4i32 VR128:$src))),
6257 (!cast<I>(OpcPrefix#DQrr) VR128:$src)>;
6259 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6260 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6261 def : Pat<(v8i16 (ExtOp (bc_v16i8 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6262 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6263 def : Pat<(v8i16 (ExtOp (v16i8 (vzmovl_v2i64 addr:$src)))),
6264 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6265 def : Pat<(v8i16 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6266 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6267 def : Pat<(v8i16 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6268 (!cast<I>(OpcPrefix#BWrm) addr:$src)>;
6270 def : Pat<(v4i32 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6271 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6272 def : Pat<(v4i32 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6273 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6274 def : Pat<(v4i32 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6275 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6276 def : Pat<(v4i32 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6277 (!cast<I>(OpcPrefix#BDrm) addr:$src)>;
6279 def : Pat<(v2i64 (ExtOp (bc_v16i8 (v4i32 (scalar_to_vector (ExtLoad16 addr:$src)))))),
6280 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6281 def : Pat<(v2i64 (ExtOp (v16i8 (vzmovl_v4i32 addr:$src)))),
6282 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6283 def : Pat<(v2i64 (ExtOp (v16i8 (vzload_v2i64 addr:$src)))),
6284 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6285 def : Pat<(v2i64 (ExtOp (bc_v16i8 (loadv2i64 addr:$src)))),
6286 (!cast<I>(OpcPrefix#BQrm) addr:$src)>;
6288 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6289 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6290 def : Pat<(v4i32 (ExtOp (bc_v8i16 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6291 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6292 def : Pat<(v4i32 (ExtOp (v8i16 (vzmovl_v2i64 addr:$src)))),
6293 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6294 def : Pat<(v4i32 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6295 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6296 def : Pat<(v4i32 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6297 (!cast<I>(OpcPrefix#WDrm) addr:$src)>;
6299 def : Pat<(v2i64 (ExtOp (bc_v8i16 (v4i32 (scalar_to_vector (loadi32 addr:$src)))))),
6300 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6301 def : Pat<(v2i64 (ExtOp (v8i16 (vzmovl_v4i32 addr:$src)))),
6302 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6303 def : Pat<(v2i64 (ExtOp (v8i16 (vzload_v2i64 addr:$src)))),
6304 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6305 def : Pat<(v2i64 (ExtOp (bc_v8i16 (loadv2i64 addr:$src)))),
6306 (!cast<I>(OpcPrefix#WQrm) addr:$src)>;
6308 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2i64 (scalar_to_vector (loadi64 addr:$src)))))),
6309 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6310 def : Pat<(v2i64 (ExtOp (bc_v4i32 (v2f64 (scalar_to_vector (loadf64 addr:$src)))))),
6311 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6312 def : Pat<(v2i64 (ExtOp (v4i32 (vzmovl_v2i64 addr:$src)))),
6313 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6314 def : Pat<(v2i64 (ExtOp (v4i32 (vzload_v2i64 addr:$src)))),
6315 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6316 def : Pat<(v2i64 (ExtOp (bc_v4i32 (loadv2i64 addr:$src)))),
6317 (!cast<I>(OpcPrefix#DQrm) addr:$src)>;
6318 }
6320 let Predicates = [HasAVX] in {
6321 defm : SS41I_pmovx_patterns<"VPMOVSX", X86vsext, extloadi32i16>;
6322 defm : SS41I_pmovx_patterns<"VPMOVZX", X86vzext, loadi16_anyext>;
6323 }
6325 let Predicates = [UseSSE41] in {
6326 defm : SS41I_pmovx_patterns<"PMOVSX", X86vsext, extloadi32i16>;
6327 defm : SS41I_pmovx_patterns<"PMOVZX", X86vzext, loadi16_anyext>;
6328 }
6330 //===----------------------------------------------------------------------===//
6331 // SSE4.1 - Extract Instructions
6332 //===----------------------------------------------------------------------===//
6334 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
6335 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
6336 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6337 (ins VR128:$src1, i32i8imm:$src2),
6338 !strconcat(OpcodeStr,
6339 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6340 [(set GR32orGR64:$dst, (X86pextrb (v16i8 VR128:$src1),
6341 imm:$src2))]>,
6342 Sched<[WriteShuffle]>;
6343 let hasSideEffects = 0, mayStore = 1,
6344 SchedRW = [WriteShuffleLd, WriteRMW] in
6345 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6346 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
6347 !strconcat(OpcodeStr,
6348 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6349 [(store (i8 (trunc (assertzext (X86pextrb (v16i8 VR128:$src1),
6350 imm:$src2)))), addr:$dst)]>;
6351 }
6353 let Predicates = [HasAVX] in
6354 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
6356 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
6359 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
6360 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
6361 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
6362 def rr_REV : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6363 (ins VR128:$src1, i32i8imm:$src2),
6364 !strconcat(OpcodeStr,
6365 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6366 []>, Sched<[WriteShuffle]>;
6368 let hasSideEffects = 0, mayStore = 1,
6369 SchedRW = [WriteShuffleLd, WriteRMW] in
6370 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6371 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
6372 !strconcat(OpcodeStr,
6373 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6374 [(store (i16 (trunc (assertzext (X86pextrw (v8i16 VR128:$src1),
6375 imm:$src2)))), addr:$dst)]>;
6376 }
6378 let Predicates = [HasAVX] in
6379 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
6381 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
6384 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6385 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
6386 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
6387 (ins VR128:$src1, i32i8imm:$src2),
6388 !strconcat(OpcodeStr,
6389 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6390 [(set GR32:$dst,
6391 (extractelt (v4i32 VR128:$src1), imm:$src2))]>,
6392 Sched<[WriteShuffle]>;
6393 let SchedRW = [WriteShuffleLd, WriteRMW] in
6394 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6395 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
6396 !strconcat(OpcodeStr,
6397 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6398 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
6399 addr:$dst)]>;
6400 }
6402 let Predicates = [HasAVX] in
6403 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
6405 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
6407 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
6408 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
6409 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
6410 (ins VR128:$src1, i32i8imm:$src2),
6411 !strconcat(OpcodeStr,
6412 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6413 [(set GR64:$dst,
6414 (extractelt (v2i64 VR128:$src1), imm:$src2))]>,
6415 Sched<[WriteShuffle]>, REX_W;
6416 let SchedRW = [WriteShuffleLd, WriteRMW] in
6417 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6418 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
6419 !strconcat(OpcodeStr,
6420 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6421 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
6422 addr:$dst)]>, REX_W;
6423 }
6425 let Predicates = [HasAVX] in
6426 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
6428 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
6430 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
6431 /// destination
6432 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr,
6433 OpndItins itins = DEFAULT_ITINS> {
6434 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32orGR64:$dst),
6435 (ins VR128:$src1, i32i8imm:$src2),
6436 !strconcat(OpcodeStr,
6437 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6438 [(set GR32orGR64:$dst,
6439 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))],
6440 itins.rr>, Sched<[WriteFBlend]>;
6441 let SchedRW = [WriteFBlendLd, WriteRMW] in
6442 def mr : SS4AIi8<opc, MRMDestMem, (outs),
6443 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
6444 !strconcat(OpcodeStr,
6445 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6446 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
6447 addr:$dst)], itins.rm>;
6448 }
6450 let ExeDomain = SSEPackedSingle in {
6451 let Predicates = [UseAVX] in
6452 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
6453 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps", SSE_EXTRACT_ITINS>;
6454 }
6456 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
6457 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6458 imm:$src2))),
6459 addr:$dst),
6460 (VEXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6461 Requires<[HasAVX]>;
6462 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
6463 imm:$src2))),
6464 addr:$dst),
6465 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
6466 Requires<[UseSSE41]>;
6468 //===----------------------------------------------------------------------===//
6469 // SSE4.1 - Insert Instructions
6470 //===----------------------------------------------------------------------===//
6472 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
6473 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6474 (ins VR128:$src1, GR32orGR64:$src2, i32i8imm:$src3),
6475 !if(Is2Addr,
6476 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6477 !strconcat(asm,
6478 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6479 [(set VR128:$dst,
6480 (X86pinsrb VR128:$src1, GR32orGR64:$src2, imm:$src3))]>,
6481 Sched<[WriteShuffle]>;
6482 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6483 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
6484 !if(Is2Addr,
6485 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6486 !strconcat(asm,
6487 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6488 [(set VR128:$dst,
6489 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
6490 imm:$src3))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6491 }
6493 let Predicates = [HasAVX] in
6494 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
6495 let Constraints = "$src1 = $dst" in
6496 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
6498 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
6499 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6500 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
6501 !if(Is2Addr,
6502 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6503 !strconcat(asm,
6504 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6505 [(set VR128:$dst,
6506 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
6507 Sched<[WriteShuffle]>;
6508 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6509 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
6510 !if(Is2Addr,
6511 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6512 !strconcat(asm,
6513 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6514 [(set VR128:$dst,
6515 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
6516 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6517 }
6519 let Predicates = [HasAVX] in
6520 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
6521 let Constraints = "$src1 = $dst" in
6522 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
6524 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
6525 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6526 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
6527 !if(Is2Addr,
6528 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6529 !strconcat(asm,
6530 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6531 [(set VR128:$dst,
6532 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
6533 Sched<[WriteShuffle]>;
6534 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6535 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
6536 !if(Is2Addr,
6537 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6538 !strconcat(asm,
6539 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6540 [(set VR128:$dst,
6541 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
6542 imm:$src3)))]>, Sched<[WriteShuffleLd, ReadAfterLd]>;
6543 }
6545 let Predicates = [HasAVX] in
6546 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
6547 let Constraints = "$src1 = $dst" in
6548 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
6550 // insertps has a few different modes, there's the first two here below which
6551 // are optimized inserts that won't zero arbitrary elements in the destination
6552 // vector. The next one matches the intrinsic and could zero arbitrary elements
6553 // in the target vector.
6554 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1,
6555 OpndItins itins = DEFAULT_ITINS> {
6556 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
6557 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6558 !if(Is2Addr,
6559 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6560 !strconcat(asm,
6561 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6562 [(set VR128:$dst,
6563 (X86insertps VR128:$src1, VR128:$src2, imm:$src3))], itins.rr>,
6564 Sched<[WriteFShuffle]>;
6565 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
6566 (ins VR128:$src1, f32mem:$src2, i8imm:$src3),
6567 !if(Is2Addr,
6568 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6569 !strconcat(asm,
6570 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6571 [(set VR128:$dst,
6572 (X86insertps VR128:$src1,
6573 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
6574 imm:$src3))], itins.rm>,
6575 Sched<[WriteFShuffleLd, ReadAfterLd]>;
6576 }
6578 let ExeDomain = SSEPackedSingle in {
6579 let Predicates = [UseAVX] in
6580 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
6581 let Constraints = "$src1 = $dst" in
6582 defm INSERTPS : SS41I_insertf32<0x21, "insertps", 1, SSE_INSERT_ITINS>;
6583 }
6585 let Predicates = [UseSSE41] in {
6586 // If we're inserting an element from a load or a null pshuf of a load,
6587 // fold the load into the insertps instruction.
6588 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd (v4f32
6589 (scalar_to_vector (loadf32 addr:$src2))), (i8 0)),
6590 imm:$src3)),
6591 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6592 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1), (X86PShufd
6593 (loadv4f32 addr:$src2), (i8 0)), imm:$src3)),
6594 (INSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6595 }
6597 let Predicates = [UseAVX] in {
6598 // If we're inserting an element from a vbroadcast of a load, fold the
6599 // load into the X86insertps instruction.
6600 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6601 (X86VBroadcast (loadf32 addr:$src2)), imm:$src3)),
6602 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6603 def : Pat<(v4f32 (X86insertps (v4f32 VR128:$src1),
6604 (X86VBroadcast (loadv4f32 addr:$src2)), imm:$src3)),
6605 (VINSERTPSrm VR128:$src1, addr:$src2, imm:$src3)>;
6606 }
6608 //===----------------------------------------------------------------------===//
6609 // SSE4.1 - Round Instructions
6610 //===----------------------------------------------------------------------===//
6612 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
6613 X86MemOperand x86memop, RegisterClass RC,
6614 PatFrag mem_frag32, PatFrag mem_frag64,
6615 Intrinsic V4F32Int, Intrinsic V2F64Int> {
6616 let ExeDomain = SSEPackedSingle in {
6617 // Intrinsic operation, reg.
6618 // Vector intrinsic operation, reg
6619 def PSr : SS4AIi8<opcps, MRMSrcReg,
6620 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6621 !strconcat(OpcodeStr,
6622 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6623 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))],
6624 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6626 // Vector intrinsic operation, mem
6627 def PSm : SS4AIi8<opcps, MRMSrcMem,
6628 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6629 !strconcat(OpcodeStr,
6630 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6631 [(set RC:$dst,
6632 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))],
6633 IIC_SSE_ROUNDPS_MEM>, Sched<[WriteFAddLd]>;
6634 } // ExeDomain = SSEPackedSingle
6636 let ExeDomain = SSEPackedDouble in {
6637 // Vector intrinsic operation, reg
6638 def PDr : SS4AIi8<opcpd, MRMSrcReg,
6639 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
6640 !strconcat(OpcodeStr,
6641 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6642 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))],
6643 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAdd]>;
6645 // Vector intrinsic operation, mem
6646 def PDm : SS4AIi8<opcpd, MRMSrcMem,
6647 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
6648 !strconcat(OpcodeStr,
6649 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6650 [(set RC:$dst,
6651 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))],
6652 IIC_SSE_ROUNDPS_REG>, Sched<[WriteFAddLd]>;
6653 } // ExeDomain = SSEPackedDouble
6654 }
6656 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
6657 string OpcodeStr,
6658 Intrinsic F32Int,
6659 Intrinsic F64Int, bit Is2Addr = 1> {
6660 let ExeDomain = GenericDomain in {
6661 // Operation, reg.
6662 let hasSideEffects = 0 in
6663 def SSr : SS4AIi8<opcss, MRMSrcReg,
6664 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, i32i8imm:$src3),
6665 !if(Is2Addr,
6666 !strconcat(OpcodeStr,
6667 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6668 !strconcat(OpcodeStr,
6669 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6670 []>, Sched<[WriteFAdd]>;
6672 // Intrinsic operation, reg.
6673 let isCodeGenOnly = 1 in
6674 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
6675 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6676 !if(Is2Addr,
6677 !strconcat(OpcodeStr,
6678 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6679 !strconcat(OpcodeStr,
6680 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6681 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6682 Sched<[WriteFAdd]>;
6684 // Intrinsic operation, mem.
6685 def SSm : SS4AIi8<opcss, MRMSrcMem,
6686 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
6687 !if(Is2Addr,
6688 !strconcat(OpcodeStr,
6689 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6690 !strconcat(OpcodeStr,
6691 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6692 [(set VR128:$dst,
6693 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
6694 Sched<[WriteFAddLd, ReadAfterLd]>;
6696 // Operation, reg.
6697 let hasSideEffects = 0 in
6698 def SDr : SS4AIi8<opcsd, MRMSrcReg,
6699 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, i32i8imm:$src3),
6700 !if(Is2Addr,
6701 !strconcat(OpcodeStr,
6702 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6703 !strconcat(OpcodeStr,
6704 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6705 []>, Sched<[WriteFAdd]>;
6707 // Intrinsic operation, reg.
6708 let isCodeGenOnly = 1 in
6709 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
6710 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
6711 !if(Is2Addr,
6712 !strconcat(OpcodeStr,
6713 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6714 !strconcat(OpcodeStr,
6715 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6716 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
6717 Sched<[WriteFAdd]>;
6719 // Intrinsic operation, mem.
6720 def SDm : SS4AIi8<opcsd, MRMSrcMem,
6721 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
6722 !if(Is2Addr,
6723 !strconcat(OpcodeStr,
6724 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
6725 !strconcat(OpcodeStr,
6726 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
6727 [(set VR128:$dst,
6728 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
6729 Sched<[WriteFAddLd, ReadAfterLd]>;
6730 } // ExeDomain = GenericDomain
6731 }
6733 // FP round - roundss, roundps, roundsd, roundpd
6734 let Predicates = [HasAVX] in {
6735 // Intrinsic form
6736 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
6737 loadv4f32, loadv2f64,
6738 int_x86_sse41_round_ps,
6739 int_x86_sse41_round_pd>, VEX;
6740 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
6741 loadv8f32, loadv4f64,
6742 int_x86_avx_round_ps_256,
6743 int_x86_avx_round_pd_256>, VEX, VEX_L;
6744 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
6745 int_x86_sse41_round_ss,
6746 int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG;
6748 def : Pat<(ffloor FR32:$src),
6749 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6750 def : Pat<(f64 (ffloor FR64:$src)),
6751 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6752 def : Pat<(f32 (fnearbyint FR32:$src)),
6753 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6754 def : Pat<(f64 (fnearbyint FR64:$src)),
6755 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6756 def : Pat<(f32 (fceil FR32:$src)),
6757 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6758 def : Pat<(f64 (fceil FR64:$src)),
6759 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6760 def : Pat<(f32 (frint FR32:$src)),
6761 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6762 def : Pat<(f64 (frint FR64:$src)),
6763 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6764 def : Pat<(f32 (ftrunc FR32:$src)),
6765 (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6766 def : Pat<(f64 (ftrunc FR64:$src)),
6767 (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6769 def : Pat<(v4f32 (ffloor VR128:$src)),
6770 (VROUNDPSr VR128:$src, (i32 0x1))>;
6771 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6772 (VROUNDPSr VR128:$src, (i32 0xC))>;
6773 def : Pat<(v4f32 (fceil VR128:$src)),
6774 (VROUNDPSr VR128:$src, (i32 0x2))>;
6775 def : Pat<(v4f32 (frint VR128:$src)),
6776 (VROUNDPSr VR128:$src, (i32 0x4))>;
6777 def : Pat<(v4f32 (ftrunc VR128:$src)),
6778 (VROUNDPSr VR128:$src, (i32 0x3))>;
6780 def : Pat<(v2f64 (ffloor VR128:$src)),
6781 (VROUNDPDr VR128:$src, (i32 0x1))>;
6782 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6783 (VROUNDPDr VR128:$src, (i32 0xC))>;
6784 def : Pat<(v2f64 (fceil VR128:$src)),
6785 (VROUNDPDr VR128:$src, (i32 0x2))>;
6786 def : Pat<(v2f64 (frint VR128:$src)),
6787 (VROUNDPDr VR128:$src, (i32 0x4))>;
6788 def : Pat<(v2f64 (ftrunc VR128:$src)),
6789 (VROUNDPDr VR128:$src, (i32 0x3))>;
6791 def : Pat<(v8f32 (ffloor VR256:$src)),
6792 (VROUNDYPSr VR256:$src, (i32 0x1))>;
6793 def : Pat<(v8f32 (fnearbyint VR256:$src)),
6794 (VROUNDYPSr VR256:$src, (i32 0xC))>;
6795 def : Pat<(v8f32 (fceil VR256:$src)),
6796 (VROUNDYPSr VR256:$src, (i32 0x2))>;
6797 def : Pat<(v8f32 (frint VR256:$src)),
6798 (VROUNDYPSr VR256:$src, (i32 0x4))>;
6799 def : Pat<(v8f32 (ftrunc VR256:$src)),
6800 (VROUNDYPSr VR256:$src, (i32 0x3))>;
6802 def : Pat<(v4f64 (ffloor VR256:$src)),
6803 (VROUNDYPDr VR256:$src, (i32 0x1))>;
6804 def : Pat<(v4f64 (fnearbyint VR256:$src)),
6805 (VROUNDYPDr VR256:$src, (i32 0xC))>;
6806 def : Pat<(v4f64 (fceil VR256:$src)),
6807 (VROUNDYPDr VR256:$src, (i32 0x2))>;
6808 def : Pat<(v4f64 (frint VR256:$src)),
6809 (VROUNDYPDr VR256:$src, (i32 0x4))>;
6810 def : Pat<(v4f64 (ftrunc VR256:$src)),
6811 (VROUNDYPDr VR256:$src, (i32 0x3))>;
6812 }
6814 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
6815 memopv4f32, memopv2f64,
6816 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
6817 let Constraints = "$src1 = $dst" in
6818 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
6819 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
6821 let Predicates = [UseSSE41] in {
6822 def : Pat<(ffloor FR32:$src),
6823 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>;
6824 def : Pat<(f64 (ffloor FR64:$src)),
6825 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>;
6826 def : Pat<(f32 (fnearbyint FR32:$src)),
6827 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>;
6828 def : Pat<(f64 (fnearbyint FR64:$src)),
6829 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>;
6830 def : Pat<(f32 (fceil FR32:$src)),
6831 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>;
6832 def : Pat<(f64 (fceil FR64:$src)),
6833 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>;
6834 def : Pat<(f32 (frint FR32:$src)),
6835 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>;
6836 def : Pat<(f64 (frint FR64:$src)),
6837 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>;
6838 def : Pat<(f32 (ftrunc FR32:$src)),
6839 (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>;
6840 def : Pat<(f64 (ftrunc FR64:$src)),
6841 (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>;
6843 def : Pat<(v4f32 (ffloor VR128:$src)),
6844 (ROUNDPSr VR128:$src, (i32 0x1))>;
6845 def : Pat<(v4f32 (fnearbyint VR128:$src)),
6846 (ROUNDPSr VR128:$src, (i32 0xC))>;
6847 def : Pat<(v4f32 (fceil VR128:$src)),
6848 (ROUNDPSr VR128:$src, (i32 0x2))>;
6849 def : Pat<(v4f32 (frint VR128:$src)),
6850 (ROUNDPSr VR128:$src, (i32 0x4))>;
6851 def : Pat<(v4f32 (ftrunc VR128:$src)),
6852 (ROUNDPSr VR128:$src, (i32 0x3))>;
6854 def : Pat<(v2f64 (ffloor VR128:$src)),
6855 (ROUNDPDr VR128:$src, (i32 0x1))>;
6856 def : Pat<(v2f64 (fnearbyint VR128:$src)),
6857 (ROUNDPDr VR128:$src, (i32 0xC))>;
6858 def : Pat<(v2f64 (fceil VR128:$src)),
6859 (ROUNDPDr VR128:$src, (i32 0x2))>;
6860 def : Pat<(v2f64 (frint VR128:$src)),
6861 (ROUNDPDr VR128:$src, (i32 0x4))>;
6862 def : Pat<(v2f64 (ftrunc VR128:$src)),
6863 (ROUNDPDr VR128:$src, (i32 0x3))>;
6864 }
6866 //===----------------------------------------------------------------------===//
6867 // SSE4.1 - Packed Bit Test
6868 //===----------------------------------------------------------------------===//
6870 // ptest instruction we'll lower to this in X86ISelLowering primarily from
6871 // the intel intrinsic that corresponds to this.
6872 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6873 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6874 "vptest\t{$src2, $src1|$src1, $src2}",
6875 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6876 Sched<[WriteVecLogic]>, VEX;
6877 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6878 "vptest\t{$src2, $src1|$src1, $src2}",
6879 [(set EFLAGS,(X86ptest VR128:$src1, (loadv2i64 addr:$src2)))]>,
6880 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6882 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
6883 "vptest\t{$src2, $src1|$src1, $src2}",
6884 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
6885 Sched<[WriteVecLogic]>, VEX, VEX_L;
6886 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
6887 "vptest\t{$src2, $src1|$src1, $src2}",
6888 [(set EFLAGS,(X86ptest VR256:$src1, (loadv4i64 addr:$src2)))]>,
6889 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX, VEX_L;
6890 }
6892 let Defs = [EFLAGS] in {
6893 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
6894 "ptest\t{$src2, $src1|$src1, $src2}",
6895 [(set EFLAGS, (X86ptest VR128:$src1, (v2i64 VR128:$src2)))]>,
6896 Sched<[WriteVecLogic]>;
6897 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
6898 "ptest\t{$src2, $src1|$src1, $src2}",
6899 [(set EFLAGS, (X86ptest VR128:$src1, (memopv2i64 addr:$src2)))]>,
6900 Sched<[WriteVecLogicLd, ReadAfterLd]>;
6901 }
6903 // The bit test instructions below are AVX only
6904 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
6905 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
6906 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
6907 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6908 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>,
6909 Sched<[WriteVecLogic]>, VEX;
6910 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
6911 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
6912 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
6913 Sched<[WriteVecLogicLd, ReadAfterLd]>, VEX;
6914 }
6916 let Defs = [EFLAGS], Predicates = [HasAVX] in {
6917 let ExeDomain = SSEPackedSingle in {
6918 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, loadv4f32, v4f32>;
6919 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, loadv8f32, v8f32>,
6920 VEX_L;
6921 }
6922 let ExeDomain = SSEPackedDouble in {
6923 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, loadv2f64, v2f64>;
6924 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, loadv4f64, v4f64>,
6925 VEX_L;
6926 }
6927 }
6929 //===----------------------------------------------------------------------===//
6930 // SSE4.1 - Misc Instructions
6931 //===----------------------------------------------------------------------===//
6933 let Defs = [EFLAGS], Predicates = [HasPOPCNT] in {
6934 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
6935 "popcnt{w}\t{$src, $dst|$dst, $src}",
6936 [(set GR16:$dst, (ctpop GR16:$src)), (implicit EFLAGS)],
6937 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6938 OpSize16, XS;
6939 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
6940 "popcnt{w}\t{$src, $dst|$dst, $src}",
6941 [(set GR16:$dst, (ctpop (loadi16 addr:$src))),
6942 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6943 Sched<[WriteFAddLd]>, OpSize16, XS;
6945 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
6946 "popcnt{l}\t{$src, $dst|$dst, $src}",
6947 [(set GR32:$dst, (ctpop GR32:$src)), (implicit EFLAGS)],
6948 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>,
6949 OpSize32, XS;
6951 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
6952 "popcnt{l}\t{$src, $dst|$dst, $src}",
6953 [(set GR32:$dst, (ctpop (loadi32 addr:$src))),
6954 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6955 Sched<[WriteFAddLd]>, OpSize32, XS;
6957 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
6958 "popcnt{q}\t{$src, $dst|$dst, $src}",
6959 [(set GR64:$dst, (ctpop GR64:$src)), (implicit EFLAGS)],
6960 IIC_SSE_POPCNT_RR>, Sched<[WriteFAdd]>, XS;
6961 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
6962 "popcnt{q}\t{$src, $dst|$dst, $src}",
6963 [(set GR64:$dst, (ctpop (loadi64 addr:$src))),
6964 (implicit EFLAGS)], IIC_SSE_POPCNT_RM>,
6965 Sched<[WriteFAddLd]>, XS;
6966 }
6970 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
6971 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
6972 Intrinsic IntId128,
6973 X86FoldableSchedWrite Sched> {
6974 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
6975 (ins VR128:$src),
6976 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6977 [(set VR128:$dst, (IntId128 VR128:$src))]>,
6978 Sched<[Sched]>;
6979 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
6980 (ins i128mem:$src),
6981 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6982 [(set VR128:$dst,
6983 (IntId128 (bitconvert (memopv2i64 addr:$src))))]>,
6984 Sched<[Sched.Folded]>;
6985 }
6987 // PHMIN has the same profile as PSAD, thus we use the same scheduling
6988 // model, although the naming is misleading.
6989 let Predicates = [HasAVX] in
6990 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
6991 int_x86_sse41_phminposuw,
6992 WriteVecIMul>, VEX;
6993 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
6994 int_x86_sse41_phminposuw,
6995 WriteVecIMul>;
6997 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
6998 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
6999 Intrinsic IntId128, bit Is2Addr = 1,
7000 OpndItins itins = DEFAULT_ITINS> {
7001 let isCommutable = 1 in
7002 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7003 (ins VR128:$src1, VR128:$src2),
7004 !if(Is2Addr,
7005 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7006 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7007 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))],
7008 itins.rr>, Sched<[itins.Sched]>;
7009 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7010 (ins VR128:$src1, i128mem:$src2),
7011 !if(Is2Addr,
7012 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7013 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7014 [(set VR128:$dst,
7015 (IntId128 VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))],
7016 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7017 }
7019 /// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator
7020 multiclass SS41I_binop_rm_int_y<bits<8> opc, string OpcodeStr,
7021 Intrinsic IntId256,
7022 X86FoldableSchedWrite Sched> {
7023 let isCommutable = 1 in
7024 def Yrr : SS48I<opc, MRMSrcReg, (outs VR256:$dst),
7025 (ins VR256:$src1, VR256:$src2),
7026 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7027 [(set VR256:$dst, (IntId256 VR256:$src1, VR256:$src2))]>,
7028 Sched<[Sched]>;
7029 def Yrm : SS48I<opc, MRMSrcMem, (outs VR256:$dst),
7030 (ins VR256:$src1, i256mem:$src2),
7031 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
7032 [(set VR256:$dst,
7033 (IntId256 VR256:$src1, (bitconvert (loadv4i64 addr:$src2))))]>,
7034 Sched<[Sched.Folded, ReadAfterLd]>;
7035 }
7038 /// SS48I_binop_rm - Simple SSE41 binary operator.
7039 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7040 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7041 X86MemOperand x86memop, bit Is2Addr = 1,
7042 OpndItins itins = SSE_INTALU_ITINS_P> {
7043 let isCommutable = 1 in
7044 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
7045 (ins RC:$src1, RC:$src2),
7046 !if(Is2Addr,
7047 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7048 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7049 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>,
7050 Sched<[itins.Sched]>;
7051 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
7052 (ins RC:$src1, x86memop:$src2),
7053 !if(Is2Addr,
7054 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7055 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7056 [(set RC:$dst,
7057 (OpVT (OpNode RC:$src1, (bitconvert (memop_frag addr:$src2)))))]>,
7058 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7059 }
7061 /// SS48I_binop_rm2 - Simple SSE41 binary operator with different src and dst
7062 /// types.
7063 multiclass SS48I_binop_rm2<bits<8> opc, string OpcodeStr, SDNode OpNode,
7064 ValueType DstVT, ValueType SrcVT, RegisterClass RC,
7065 PatFrag memop_frag, X86MemOperand x86memop,
7066 OpndItins itins,
7067 bit IsCommutable = 0, bit Is2Addr = 1> {
7068 let isCommutable = IsCommutable in
7069 def rr : SS48I<opc, MRMSrcReg, (outs RC:$dst),
7070 (ins RC:$src1, RC:$src2),
7071 !if(Is2Addr,
7072 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7073 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7074 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1), RC:$src2)))]>,
7075 Sched<[itins.Sched]>;
7076 def rm : SS48I<opc, MRMSrcMem, (outs RC:$dst),
7077 (ins RC:$src1, x86memop:$src2),
7078 !if(Is2Addr,
7079 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7080 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7081 [(set RC:$dst, (DstVT (OpNode (SrcVT RC:$src1),
7082 (bitconvert (memop_frag addr:$src2)))))]>,
7083 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7084 }
7086 let Predicates = [HasAVX] in {
7087 let isCommutable = 0 in
7088 defm VPMINSB : SS48I_binop_rm<0x38, "vpminsb", X86smin, v16i8, VR128,
7089 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7090 VEX_4V;
7091 defm VPMINSD : SS48I_binop_rm<0x39, "vpminsd", X86smin, v4i32, VR128,
7092 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7093 VEX_4V;
7094 defm VPMINUD : SS48I_binop_rm<0x3B, "vpminud", X86umin, v4i32, VR128,
7095 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7096 VEX_4V;
7097 defm VPMINUW : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v8i16, VR128,
7098 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7099 VEX_4V;
7100 defm VPMAXSB : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v16i8, VR128,
7101 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7102 VEX_4V;
7103 defm VPMAXSD : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v4i32, VR128,
7104 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7105 VEX_4V;
7106 defm VPMAXUD : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v4i32, VR128,
7107 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7108 VEX_4V;
7109 defm VPMAXUW : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v8i16, VR128,
7110 loadv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7111 VEX_4V;
7112 defm VPMULDQ : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v2i64, v4i32,
7113 VR128, loadv2i64, i128mem,
7114 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V;
7115 }
7117 let Predicates = [HasAVX2] in {
7118 let isCommutable = 0 in
7119 defm VPMINSBY : SS48I_binop_rm<0x38, "vpminsb", X86smin, v32i8, VR256,
7120 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7121 VEX_4V, VEX_L;
7122 defm VPMINSDY : SS48I_binop_rm<0x39, "vpminsd", X86smin, v8i32, VR256,
7123 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7124 VEX_4V, VEX_L;
7125 defm VPMINUDY : SS48I_binop_rm<0x3B, "vpminud", X86umin, v8i32, VR256,
7126 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7127 VEX_4V, VEX_L;
7128 defm VPMINUWY : SS48I_binop_rm<0x3A, "vpminuw", X86umin, v16i16, VR256,
7129 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7130 VEX_4V, VEX_L;
7131 defm VPMAXSBY : SS48I_binop_rm<0x3C, "vpmaxsb", X86smax, v32i8, VR256,
7132 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7133 VEX_4V, VEX_L;
7134 defm VPMAXSDY : SS48I_binop_rm<0x3D, "vpmaxsd", X86smax, v8i32, VR256,
7135 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7136 VEX_4V, VEX_L;
7137 defm VPMAXUDY : SS48I_binop_rm<0x3F, "vpmaxud", X86umax, v8i32, VR256,
7138 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7139 VEX_4V, VEX_L;
7140 defm VPMAXUWY : SS48I_binop_rm<0x3E, "vpmaxuw", X86umax, v16i16, VR256,
7141 loadv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7142 VEX_4V, VEX_L;
7143 defm VPMULDQY : SS48I_binop_rm2<0x28, "vpmuldq", X86pmuldq, v4i64, v8i32,
7144 VR256, loadv4i64, i256mem,
7145 SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L;
7146 }
7148 let Constraints = "$src1 = $dst" in {
7149 let isCommutable = 0 in
7150 defm PMINSB : SS48I_binop_rm<0x38, "pminsb", X86smin, v16i8, VR128,
7151 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7152 defm PMINSD : SS48I_binop_rm<0x39, "pminsd", X86smin, v4i32, VR128,
7153 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7154 defm PMINUD : SS48I_binop_rm<0x3B, "pminud", X86umin, v4i32, VR128,
7155 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7156 defm PMINUW : SS48I_binop_rm<0x3A, "pminuw", X86umin, v8i16, VR128,
7157 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7158 defm PMAXSB : SS48I_binop_rm<0x3C, "pmaxsb", X86smax, v16i8, VR128,
7159 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7160 defm PMAXSD : SS48I_binop_rm<0x3D, "pmaxsd", X86smax, v4i32, VR128,
7161 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7162 defm PMAXUD : SS48I_binop_rm<0x3F, "pmaxud", X86umax, v4i32, VR128,
7163 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7164 defm PMAXUW : SS48I_binop_rm<0x3E, "pmaxuw", X86umax, v8i16, VR128,
7165 memopv2i64, i128mem, 1, SSE_INTALU_ITINS_P>;
7166 defm PMULDQ : SS48I_binop_rm2<0x28, "pmuldq", X86pmuldq, v2i64, v4i32,
7167 VR128, memopv2i64, i128mem,
7168 SSE_INTMUL_ITINS_P, 1>;
7169 }
7171 let Predicates = [HasAVX] in {
7172 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, VR128,
7173 memopv2i64, i128mem, 0, SSE_PMULLD_ITINS>,
7174 VEX_4V;
7175 defm VPCMPEQQ : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v2i64, VR128,
7176 memopv2i64, i128mem, 0, SSE_INTALU_ITINS_P>,
7177 VEX_4V;
7178 }
7179 let Predicates = [HasAVX2] in {
7180 defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256,
7181 memopv4i64, i256mem, 0, SSE_PMULLD_ITINS>,
7182 VEX_4V, VEX_L;
7183 defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256,
7184 memopv4i64, i256mem, 0, SSE_INTALU_ITINS_P>,
7185 VEX_4V, VEX_L;
7186 }
7188 let Constraints = "$src1 = $dst" in {
7189 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, VR128,
7190 memopv2i64, i128mem, 1, SSE_PMULLD_ITINS>;
7191 defm PCMPEQQ : SS48I_binop_rm<0x29, "pcmpeqq", X86pcmpeq, v2i64, VR128,
7192 memopv2i64, i128mem, 1, SSE_INTALUQ_ITINS_P>;
7193 }
7195 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
7196 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
7197 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
7198 X86MemOperand x86memop, bit Is2Addr = 1,
7199 OpndItins itins = DEFAULT_ITINS> {
7200 let isCommutable = 1 in
7201 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
7202 (ins RC:$src1, RC:$src2, i8imm:$src3),
7203 !if(Is2Addr,
7204 !strconcat(OpcodeStr,
7205 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7206 !strconcat(OpcodeStr,
7207 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7208 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))], itins.rr>,
7209 Sched<[itins.Sched]>;
7210 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
7211 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
7212 !if(Is2Addr,
7213 !strconcat(OpcodeStr,
7214 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
7215 !strconcat(OpcodeStr,
7216 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
7217 [(set RC:$dst,
7218 (IntId RC:$src1,
7219 (bitconvert (memop_frag addr:$src2)), imm:$src3))], itins.rm>,
7220 Sched<[itins.Sched.Folded, ReadAfterLd]>;
7221 }
7223 let Predicates = [HasAVX] in {
7224 let isCommutable = 0 in {
7225 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
7226 VR128, loadv2i64, i128mem, 0,
7227 DEFAULT_ITINS_MPSADSCHED>, VEX_4V;
7228 }
7230 let ExeDomain = SSEPackedSingle in {
7231 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
7232 VR128, loadv4f32, f128mem, 0,
7233 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7234 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
7235 int_x86_avx_blend_ps_256, VR256, loadv8f32,
7236 f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
7237 VEX_4V, VEX_L;
7238 }
7239 let ExeDomain = SSEPackedDouble in {
7240 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
7241 VR128, loadv2f64, f128mem, 0,
7242 DEFAULT_ITINS_FBLENDSCHED>, VEX_4V;
7243 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
7244 int_x86_avx_blend_pd_256,VR256, loadv4f64,
7245 f256mem, 0, DEFAULT_ITINS_FBLENDSCHED>,
7246 VEX_4V, VEX_L;
7247 }
7248 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
7249 VR128, loadv2i64, i128mem, 0,
7250 DEFAULT_ITINS_BLENDSCHED>, VEX_4V;
7252 let ExeDomain = SSEPackedSingle in
7253 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
7254 VR128, loadv4f32, f128mem, 0,
7255 SSE_DPPS_ITINS>, VEX_4V;
7256 let ExeDomain = SSEPackedDouble in
7257 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
7258 VR128, loadv2f64, f128mem, 0,
7259 SSE_DPPS_ITINS>, VEX_4V;
7260 let ExeDomain = SSEPackedSingle in
7261 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
7262 VR256, loadv8f32, i256mem, 0,
7263 SSE_DPPS_ITINS>, VEX_4V, VEX_L;
7264 }
7266 let Predicates = [HasAVX2] in {
7267 let isCommutable = 0 in {
7268 defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw,
7269 VR256, loadv4i64, i256mem, 0,
7270 DEFAULT_ITINS_BLENDSCHED>, VEX_4V, VEX_L;
7271 defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw,
7272 VR256, loadv4i64, i256mem, 0,
7273 DEFAULT_ITINS_MPSADSCHED>, VEX_4V, VEX_L;
7274 }
7275 }
7277 let Constraints = "$src1 = $dst" in {
7278 let isCommutable = 0 in {
7279 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
7280 VR128, memopv2i64, i128mem,
7281 1, SSE_MPSADBW_ITINS>;
7282 }
7283 let ExeDomain = SSEPackedSingle in
7284 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
7285 VR128, memopv4f32, f128mem,
7286 1, SSE_INTALU_ITINS_FBLEND_P>;
7287 let ExeDomain = SSEPackedDouble in
7288 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
7289 VR128, memopv2f64, f128mem,
7290 1, SSE_INTALU_ITINS_FBLEND_P>;
7291 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
7292 VR128, memopv2i64, i128mem,
7293 1, SSE_INTALU_ITINS_BLEND_P>;
7294 let ExeDomain = SSEPackedSingle in
7295 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
7296 VR128, memopv4f32, f128mem, 1,
7297 SSE_DPPS_ITINS>;
7298 let ExeDomain = SSEPackedDouble in
7299 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
7300 VR128, memopv2f64, f128mem, 1,
7301 SSE_DPPD_ITINS>;
7302 }
7304 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
7305 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
7306 RegisterClass RC, X86MemOperand x86memop,
7307 PatFrag mem_frag, Intrinsic IntId,
7308 X86FoldableSchedWrite Sched> {
7309 def rr : Ii8<opc, MRMSrcReg, (outs RC:$dst),
7310 (ins RC:$src1, RC:$src2, RC:$src3),
7311 !strconcat(OpcodeStr,
7312 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7313 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
7314 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7315 Sched<[Sched]>;
7317 def rm : Ii8<opc, MRMSrcMem, (outs RC:$dst),
7318 (ins RC:$src1, x86memop:$src2, RC:$src3),
7319 !strconcat(OpcodeStr,
7320 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
7321 [(set RC:$dst,
7322 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
7323 RC:$src3))],
7324 NoItinerary, SSEPackedInt>, TAPD, VEX_4V, VEX_I8IMM,
7325 Sched<[Sched.Folded, ReadAfterLd]>;
7326 }
7328 let Predicates = [HasAVX] in {
7329 let ExeDomain = SSEPackedDouble in {
7330 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem,
7331 loadv2f64, int_x86_sse41_blendvpd,
7332 WriteFVarBlend>;
7333 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem,
7334 loadv4f64, int_x86_avx_blendv_pd_256,
7335 WriteFVarBlend>, VEX_L;
7336 } // ExeDomain = SSEPackedDouble
7337 let ExeDomain = SSEPackedSingle in {
7338 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem,
7339 loadv4f32, int_x86_sse41_blendvps,
7340 WriteFVarBlend>;
7341 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem,
7342 loadv8f32, int_x86_avx_blendv_ps_256,
7343 WriteFVarBlend>, VEX_L;
7344 } // ExeDomain = SSEPackedSingle
7345 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
7346 loadv2i64, int_x86_sse41_pblendvb,
7347 WriteVarBlend>;
7348 }
7350 let Predicates = [HasAVX2] in {
7351 defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem,
7352 loadv4i64, int_x86_avx2_pblendvb,
7353 WriteVarBlend>, VEX_L;
7354 }
7356 let Predicates = [HasAVX] in {
7357 def : Pat<(v16i8 (vselect (v16i8 VR128:$mask), (v16i8 VR128:$src1),
7358 (v16i8 VR128:$src2))),
7359 (VPBLENDVBrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7360 def : Pat<(v4i32 (vselect (v4i32 VR128:$mask), (v4i32 VR128:$src1),
7361 (v4i32 VR128:$src2))),
7362 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7363 def : Pat<(v4f32 (vselect (v4i32 VR128:$mask), (v4f32 VR128:$src1),
7364 (v4f32 VR128:$src2))),
7365 (VBLENDVPSrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7366 def : Pat<(v2i64 (vselect (v2i64 VR128:$mask), (v2i64 VR128:$src1),
7367 (v2i64 VR128:$src2))),
7368 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7369 def : Pat<(v2f64 (vselect (v2i64 VR128:$mask), (v2f64 VR128:$src1),
7370 (v2f64 VR128:$src2))),
7371 (VBLENDVPDrr VR128:$src2, VR128:$src1, VR128:$mask)>;
7372 def : Pat<(v8i32 (vselect (v8i32 VR256:$mask), (v8i32 VR256:$src1),
7373 (v8i32 VR256:$src2))),
7374 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7375 def : Pat<(v8f32 (vselect (v8i32 VR256:$mask), (v8f32 VR256:$src1),
7376 (v8f32 VR256:$src2))),
7377 (VBLENDVPSYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7378 def : Pat<(v4i64 (vselect (v4i64 VR256:$mask), (v4i64 VR256:$src1),
7379 (v4i64 VR256:$src2))),
7380 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7381 def : Pat<(v4f64 (vselect (v4i64 VR256:$mask), (v4f64 VR256:$src1),
7382 (v4f64 VR256:$src2))),
7383 (VBLENDVPDYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7385 def : Pat<(v8f32 (X86Blendi (v8f32 VR256:$src1), (v8f32 VR256:$src2),
7386 (imm:$mask))),
7387 (VBLENDPSYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7388 def : Pat<(v4f64 (X86Blendi (v4f64 VR256:$src1), (v4f64 VR256:$src2),
7389 (imm:$mask))),
7390 (VBLENDPDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7392 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
7393 (imm:$mask))),
7394 (VPBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
7395 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
7396 (imm:$mask))),
7397 (VBLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
7398 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
7399 (imm:$mask))),
7400 (VBLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7401 }
7403 let Predicates = [HasAVX2] in {
7404 def : Pat<(v32i8 (vselect (v32i8 VR256:$mask), (v32i8 VR256:$src1),
7405 (v32i8 VR256:$src2))),
7406 (VPBLENDVBYrr VR256:$src2, VR256:$src1, VR256:$mask)>;
7407 def : Pat<(v16i16 (X86Blendi (v16i16 VR256:$src1), (v16i16 VR256:$src2),
7408 (imm:$mask))),
7409 (VPBLENDWYrri VR256:$src1, VR256:$src2, imm:$mask)>;
7410 }
7412 // Patterns
7413 let Predicates = [UseAVX] in {
7414 let AddedComplexity = 15 in {
7415 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
7416 // MOVS{S,D} to the lower bits.
7417 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
7418 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
7419 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7420 (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7421 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7422 (VPBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7423 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
7424 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
7426 // Move low f32 and clear high bits.
7427 def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
7428 (VBLENDPSYrri (v8f32 (AVX_SET0)), VR256:$src, (i8 1))>;
7429 def : Pat<(v8i32 (X86vzmovl (v8i32 VR256:$src))),
7430 (VBLENDPSYrri (v8i32 (AVX_SET0)), VR256:$src, (i8 1))>;
7431 }
7433 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
7434 (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))),
7435 (SUBREG_TO_REG (i32 0),
7436 (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)),
7437 sub_xmm)>;
7438 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
7439 (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))),
7440 (SUBREG_TO_REG (i64 0),
7441 (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)),
7442 sub_xmm)>;
7444 // Move low f64 and clear high bits.
7445 def : Pat<(v4f64 (X86vzmovl (v4f64 VR256:$src))),
7446 (VBLENDPDYrri (v4f64 (AVX_SET0)), VR256:$src, (i8 1))>;
7448 def : Pat<(v4i64 (X86vzmovl (v4i64 VR256:$src))),
7449 (VBLENDPDYrri (v4i64 (AVX_SET0)), VR256:$src, (i8 1))>;
7450 }
7452 let Predicates = [UseSSE41] in {
7453 // With SSE41 we can use blends for these patterns.
7454 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
7455 (BLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
7456 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
7457 (PBLENDWrri (v4i32 (V_SET0)), VR128:$src, (i8 3))>;
7458 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
7459 (BLENDPDrri (v2f64 (V_SET0)), VR128:$src, (i8 1))>;
7460 }
7463 /// SS41I_ternary_int - SSE 4.1 ternary operator
7464 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
7465 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
7466 X86MemOperand x86memop, Intrinsic IntId,
7467 OpndItins itins = DEFAULT_ITINS> {
7468 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
7469 (ins VR128:$src1, VR128:$src2),
7470 !strconcat(OpcodeStr,
7471 "\t{$src2, $dst|$dst, $src2}"),
7472 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))],
7473 itins.rr>, Sched<[itins.Sched]>;
7475 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
7476 (ins VR128:$src1, x86memop:$src2),
7477 !strconcat(OpcodeStr,
7478 "\t{$src2, $dst|$dst, $src2}"),
7479 [(set VR128:$dst,
7480 (IntId VR128:$src1,
7481 (bitconvert (mem_frag addr:$src2)), XMM0))],
7482 itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
7483 }
7484 }
7486 let ExeDomain = SSEPackedDouble in
7487 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", memopv2f64, f128mem,
7488 int_x86_sse41_blendvpd,
7489 DEFAULT_ITINS_FBLENDSCHED>;
7490 let ExeDomain = SSEPackedSingle in
7491 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", memopv4f32, f128mem,
7492 int_x86_sse41_blendvps,
7493 DEFAULT_ITINS_FBLENDSCHED>;
7494 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", memopv2i64, i128mem,
7495 int_x86_sse41_pblendvb,
7496 DEFAULT_ITINS_VARBLENDSCHED>;
7498 // Aliases with the implicit xmm0 argument
7499 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7500 (BLENDVPDrr0 VR128:$dst, VR128:$src2)>;
7501 def : InstAlias<"blendvpd\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7502 (BLENDVPDrm0 VR128:$dst, f128mem:$src2)>;
7503 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7504 (BLENDVPSrr0 VR128:$dst, VR128:$src2)>;
7505 def : InstAlias<"blendvps\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7506 (BLENDVPSrm0 VR128:$dst, f128mem:$src2)>;
7507 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7508 (PBLENDVBrr0 VR128:$dst, VR128:$src2)>;
7509 def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7510 (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>;
7512 let Predicates = [UseSSE41] in {
7513 def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1),
7514 (v16i8 VR128:$src2))),
7515 (PBLENDVBrr0 VR128:$src2, VR128:$src1)>;
7516 def : Pat<(v4i32 (vselect (v4i32 XMM0), (v4i32 VR128:$src1),
7517 (v4i32 VR128:$src2))),
7518 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7519 def : Pat<(v4f32 (vselect (v4i32 XMM0), (v4f32 VR128:$src1),
7520 (v4f32 VR128:$src2))),
7521 (BLENDVPSrr0 VR128:$src2, VR128:$src1)>;
7522 def : Pat<(v2i64 (vselect (v2i64 XMM0), (v2i64 VR128:$src1),
7523 (v2i64 VR128:$src2))),
7524 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7525 def : Pat<(v2f64 (vselect (v2i64 XMM0), (v2f64 VR128:$src1),
7526 (v2f64 VR128:$src2))),
7527 (BLENDVPDrr0 VR128:$src2, VR128:$src1)>;
7529 def : Pat<(v8i16 (X86Blendi (v8i16 VR128:$src1), (v8i16 VR128:$src2),
7530 (imm:$mask))),
7531 (PBLENDWrri VR128:$src1, VR128:$src2, imm:$mask)>;
7532 def : Pat<(v4f32 (X86Blendi (v4f32 VR128:$src1), (v4f32 VR128:$src2),
7533 (imm:$mask))),
7534 (BLENDPSrri VR128:$src1, VR128:$src2, imm:$mask)>;
7535 def : Pat<(v2f64 (X86Blendi (v2f64 VR128:$src1), (v2f64 VR128:$src2),
7536 (imm:$mask))),
7537 (BLENDPDrri VR128:$src1, VR128:$src2, imm:$mask)>;
7539 }
7541 let SchedRW = [WriteLoad] in {
7542 let Predicates = [HasAVX] in
7543 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7544 "vmovntdqa\t{$src, $dst|$dst, $src}",
7545 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
7546 VEX;
7547 let Predicates = [HasAVX2] in
7548 def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
7549 "vmovntdqa\t{$src, $dst|$dst, $src}",
7550 [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>,
7551 VEX, VEX_L;
7552 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
7553 "movntdqa\t{$src, $dst|$dst, $src}",
7554 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;
7555 } // SchedRW
7557 //===----------------------------------------------------------------------===//
7558 // SSE4.2 - Compare Instructions
7559 //===----------------------------------------------------------------------===//
7561 /// SS42I_binop_rm - Simple SSE 4.2 binary operator
7562 multiclass SS42I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
7563 ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
7564 X86MemOperand x86memop, bit Is2Addr = 1> {
7565 def rr : SS428I<opc, MRMSrcReg, (outs RC:$dst),
7566 (ins RC:$src1, RC:$src2),
7567 !if(Is2Addr,
7568 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7569 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7570 [(set RC:$dst, (OpVT (OpNode RC:$src1, RC:$src2)))]>;
7571 def rm : SS428I<opc, MRMSrcMem, (outs RC:$dst),
7572 (ins RC:$src1, x86memop:$src2),
7573 !if(Is2Addr,
7574 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7575 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7576 [(set RC:$dst,
7577 (OpVT (OpNode RC:$src1, (memop_frag addr:$src2))))]>;
7578 }
7580 let Predicates = [HasAVX] in
7581 defm VPCMPGTQ : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v2i64, VR128,
7582 loadv2i64, i128mem, 0>, VEX_4V;
7584 let Predicates = [HasAVX2] in
7585 defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256,
7586 loadv4i64, i256mem, 0>, VEX_4V, VEX_L;
7588 let Constraints = "$src1 = $dst" in
7589 defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128,
7590 memopv2i64, i128mem>;
7592 //===----------------------------------------------------------------------===//
7593 // SSE4.2 - String/text Processing Instructions
7594 //===----------------------------------------------------------------------===//
7596 // Packed Compare Implicit Length Strings, Return Mask
7597 multiclass pseudo_pcmpistrm<string asm> {
7598 def REG : PseudoI<(outs VR128:$dst),
7599 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7600 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
7601 imm:$src3))]>;
7602 def MEM : PseudoI<(outs VR128:$dst),
7603 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7604 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1,
7605 (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
7606 }
7608 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7609 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
7610 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[UseSSE42]>;
7611 }
7613 multiclass pcmpistrm_SS42AI<string asm> {
7614 def rr : SS42AI<0x62, MRMSrcReg, (outs),
7615 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7616 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7617 []>, Sched<[WritePCmpIStrM]>;
7618 let mayLoad = 1 in
7619 def rm :SS42AI<0x62, MRMSrcMem, (outs),
7620 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7621 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7622 []>, Sched<[WritePCmpIStrMLd, ReadAfterLd]>;
7623 }
7625 let Defs = [XMM0, EFLAGS], hasSideEffects = 0 in {
7626 let Predicates = [HasAVX] in
7627 defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX;
7628 defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ;
7629 }
7631 // Packed Compare Explicit Length Strings, Return Mask
7632 multiclass pseudo_pcmpestrm<string asm> {
7633 def REG : PseudoI<(outs VR128:$dst),
7634 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7635 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
7636 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7637 def MEM : PseudoI<(outs VR128:$dst),
7638 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7639 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX,
7640 (bc_v16i8 (memopv2i64 addr:$src3)), EDX, imm:$src5))]>;
7641 }
7643 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7644 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
7645 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[UseSSE42]>;
7646 }
7648 multiclass SS42AI_pcmpestrm<string asm> {
7649 def rr : SS42AI<0x60, MRMSrcReg, (outs),
7650 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7651 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7652 []>, Sched<[WritePCmpEStrM]>;
7653 let mayLoad = 1 in
7654 def rm : SS42AI<0x60, MRMSrcMem, (outs),
7655 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7656 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7657 []>, Sched<[WritePCmpEStrMLd, ReadAfterLd]>;
7658 }
7660 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7661 let Predicates = [HasAVX] in
7662 defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX;
7663 defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">;
7664 }
7666 // Packed Compare Implicit Length Strings, Return Index
7667 multiclass pseudo_pcmpistri<string asm> {
7668 def REG : PseudoI<(outs GR32:$dst),
7669 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7670 [(set GR32:$dst, EFLAGS,
7671 (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>;
7672 def MEM : PseudoI<(outs GR32:$dst),
7673 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7674 [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1,
7675 (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>;
7676 }
7678 let Defs = [EFLAGS], usesCustomInserter = 1 in {
7679 defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI">, Requires<[HasAVX]>;
7680 defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI">, Requires<[UseSSE42]>;
7681 }
7683 multiclass SS42AI_pcmpistri<string asm> {
7684 def rr : SS42AI<0x63, MRMSrcReg, (outs),
7685 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7686 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7687 []>, Sched<[WritePCmpIStrI]>;
7688 let mayLoad = 1 in
7689 def rm : SS42AI<0x63, MRMSrcMem, (outs),
7690 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7691 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
7692 []>, Sched<[WritePCmpIStrILd, ReadAfterLd]>;
7693 }
7695 let Defs = [ECX, EFLAGS], hasSideEffects = 0 in {
7696 let Predicates = [HasAVX] in
7697 defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX;
7698 defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">;
7699 }
7701 // Packed Compare Explicit Length Strings, Return Index
7702 multiclass pseudo_pcmpestri<string asm> {
7703 def REG : PseudoI<(outs GR32:$dst),
7704 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7705 [(set GR32:$dst, EFLAGS,
7706 (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
7707 def MEM : PseudoI<(outs GR32:$dst),
7708 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7709 [(set GR32:$dst, EFLAGS,
7710 (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (memopv2i64 addr:$src3)), EDX,
7711 imm:$src5))]>;
7712 }
7714 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
7715 defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI">, Requires<[HasAVX]>;
7716 defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI">, Requires<[UseSSE42]>;
7717 }
7719 multiclass SS42AI_pcmpestri<string asm> {
7720 def rr : SS42AI<0x61, MRMSrcReg, (outs),
7721 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
7722 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7723 []>, Sched<[WritePCmpEStrI]>;
7724 let mayLoad = 1 in
7725 def rm : SS42AI<0x61, MRMSrcMem, (outs),
7726 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
7727 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
7728 []>, Sched<[WritePCmpEStrILd, ReadAfterLd]>;
7729 }
7731 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], hasSideEffects = 0 in {
7732 let Predicates = [HasAVX] in
7733 defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX;
7734 defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">;
7735 }
7737 //===----------------------------------------------------------------------===//
7738 // SSE4.2 - CRC Instructions
7739 //===----------------------------------------------------------------------===//
7741 // No CRC instructions have AVX equivalents
7743 // crc intrinsic instruction
7744 // This set of instructions are only rm, the only difference is the size
7745 // of r and m.
7746 class SS42I_crc32r<bits<8> opc, string asm, RegisterClass RCOut,
7747 RegisterClass RCIn, SDPatternOperator Int> :
7748 SS42FI<opc, MRMSrcReg, (outs RCOut:$dst), (ins RCOut:$src1, RCIn:$src2),
7749 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7750 [(set RCOut:$dst, (Int RCOut:$src1, RCIn:$src2))], IIC_CRC32_REG>,
7751 Sched<[WriteFAdd]>;
7753 class SS42I_crc32m<bits<8> opc, string asm, RegisterClass RCOut,
7754 X86MemOperand x86memop, SDPatternOperator Int> :
7755 SS42FI<opc, MRMSrcMem, (outs RCOut:$dst), (ins RCOut:$src1, x86memop:$src2),
7756 !strconcat(asm, "\t{$src2, $src1|$src1, $src2}"),
7757 [(set RCOut:$dst, (Int RCOut:$src1, (load addr:$src2)))],
7758 IIC_CRC32_MEM>, Sched<[WriteFAddLd, ReadAfterLd]>;
7760 let Constraints = "$src1 = $dst" in {
7761 def CRC32r32m8 : SS42I_crc32m<0xF0, "crc32{b}", GR32, i8mem,
7762 int_x86_sse42_crc32_32_8>;
7763 def CRC32r32r8 : SS42I_crc32r<0xF0, "crc32{b}", GR32, GR8,
7764 int_x86_sse42_crc32_32_8>;
7765 def CRC32r32m16 : SS42I_crc32m<0xF1, "crc32{w}", GR32, i16mem,
7766 int_x86_sse42_crc32_32_16>, OpSize16;
7767 def CRC32r32r16 : SS42I_crc32r<0xF1, "crc32{w}", GR32, GR16,
7768 int_x86_sse42_crc32_32_16>, OpSize16;
7769 def CRC32r32m32 : SS42I_crc32m<0xF1, "crc32{l}", GR32, i32mem,
7770 int_x86_sse42_crc32_32_32>, OpSize32;
7771 def CRC32r32r32 : SS42I_crc32r<0xF1, "crc32{l}", GR32, GR32,
7772 int_x86_sse42_crc32_32_32>, OpSize32;
7773 def CRC32r64m64 : SS42I_crc32m<0xF1, "crc32{q}", GR64, i64mem,
7774 int_x86_sse42_crc32_64_64>, REX_W;
7775 def CRC32r64r64 : SS42I_crc32r<0xF1, "crc32{q}", GR64, GR64,
7776 int_x86_sse42_crc32_64_64>, REX_W;
7777 let hasSideEffects = 0 in {
7778 let mayLoad = 1 in
7779 def CRC32r64m8 : SS42I_crc32m<0xF0, "crc32{b}", GR64, i8mem,
7780 null_frag>, REX_W;
7781 def CRC32r64r8 : SS42I_crc32r<0xF0, "crc32{b}", GR64, GR8,
7782 null_frag>, REX_W;
7783 }
7784 }
7786 //===----------------------------------------------------------------------===//
7787 // SHA-NI Instructions
7788 //===----------------------------------------------------------------------===//
7790 multiclass SHAI_binop<bits<8> Opc, string OpcodeStr, Intrinsic IntId,
7791 bit UsesXMM0 = 0> {
7792 def rr : I<Opc, MRMSrcReg, (outs VR128:$dst),
7793 (ins VR128:$src1, VR128:$src2),
7794 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7795 [!if(UsesXMM0,
7796 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0)),
7797 (set VR128:$dst, (IntId VR128:$src1, VR128:$src2)))]>, T8;
7799 def rm : I<Opc, MRMSrcMem, (outs VR128:$dst),
7800 (ins VR128:$src1, i128mem:$src2),
7801 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7802 [!if(UsesXMM0,
7803 (set VR128:$dst, (IntId VR128:$src1,
7804 (bc_v4i32 (memopv2i64 addr:$src2)), XMM0)),
7805 (set VR128:$dst, (IntId VR128:$src1,
7806 (bc_v4i32 (memopv2i64 addr:$src2)))))]>, T8;
7807 }
7809 let Constraints = "$src1 = $dst", Predicates = [HasSHA] in {
7810 def SHA1RNDS4rri : Ii8<0xCC, MRMSrcReg, (outs VR128:$dst),
7811 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7812 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7813 [(set VR128:$dst,
7814 (int_x86_sha1rnds4 VR128:$src1, VR128:$src2,
7815 (i8 imm:$src3)))]>, TA;
7816 def SHA1RNDS4rmi : Ii8<0xCC, MRMSrcMem, (outs VR128:$dst),
7817 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7818 "sha1rnds4\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7819 [(set VR128:$dst,
7820 (int_x86_sha1rnds4 VR128:$src1,
7821 (bc_v4i32 (memopv2i64 addr:$src2)),
7822 (i8 imm:$src3)))]>, TA;
7824 defm SHA1NEXTE : SHAI_binop<0xC8, "sha1nexte", int_x86_sha1nexte>;
7825 defm SHA1MSG1 : SHAI_binop<0xC9, "sha1msg1", int_x86_sha1msg1>;
7826 defm SHA1MSG2 : SHAI_binop<0xCA, "sha1msg2", int_x86_sha1msg2>;
7828 let Uses=[XMM0] in
7829 defm SHA256RNDS2 : SHAI_binop<0xCB, "sha256rnds2", int_x86_sha256rnds2, 1>;
7831 defm SHA256MSG1 : SHAI_binop<0xCC, "sha256msg1", int_x86_sha256msg1>;
7832 defm SHA256MSG2 : SHAI_binop<0xCD, "sha256msg2", int_x86_sha256msg2>;
7833 }
7835 // Aliases with explicit %xmm0
7836 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7837 (SHA256RNDS2rr VR128:$dst, VR128:$src2)>;
7838 def : InstAlias<"sha256rnds2\t{%xmm0, $src2, $dst|$dst, $src2, xmm0}",
7839 (SHA256RNDS2rm VR128:$dst, i128mem:$src2)>;
7841 //===----------------------------------------------------------------------===//
7842 // AES-NI Instructions
7843 //===----------------------------------------------------------------------===//
7845 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
7846 Intrinsic IntId128, bit Is2Addr = 1> {
7847 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
7848 (ins VR128:$src1, VR128:$src2),
7849 !if(Is2Addr,
7850 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7851 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7852 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
7853 Sched<[WriteAESDecEnc]>;
7854 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
7855 (ins VR128:$src1, i128mem:$src2),
7856 !if(Is2Addr,
7857 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
7858 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
7859 [(set VR128:$dst,
7860 (IntId128 VR128:$src1, (memopv2i64 addr:$src2)))]>,
7861 Sched<[WriteAESDecEncLd, ReadAfterLd]>;
7862 }
7864 // Perform One Round of an AES Encryption/Decryption Flow
7865 let Predicates = [HasAVX, HasAES] in {
7866 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
7867 int_x86_aesni_aesenc, 0>, VEX_4V;
7868 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
7869 int_x86_aesni_aesenclast, 0>, VEX_4V;
7870 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
7871 int_x86_aesni_aesdec, 0>, VEX_4V;
7872 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
7873 int_x86_aesni_aesdeclast, 0>, VEX_4V;
7874 }
7876 let Constraints = "$src1 = $dst" in {
7877 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
7878 int_x86_aesni_aesenc>;
7879 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
7880 int_x86_aesni_aesenclast>;
7881 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
7882 int_x86_aesni_aesdec>;
7883 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
7884 int_x86_aesni_aesdeclast>;
7885 }
7887 // Perform the AES InvMixColumn Transformation
7888 let Predicates = [HasAVX, HasAES] in {
7889 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7890 (ins VR128:$src1),
7891 "vaesimc\t{$src1, $dst|$dst, $src1}",
7892 [(set VR128:$dst,
7893 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>,
7894 VEX;
7895 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7896 (ins i128mem:$src1),
7897 "vaesimc\t{$src1, $dst|$dst, $src1}",
7898 [(set VR128:$dst, (int_x86_aesni_aesimc (loadv2i64 addr:$src1)))]>,
7899 Sched<[WriteAESIMCLd]>, VEX;
7900 }
7901 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
7902 (ins VR128:$src1),
7903 "aesimc\t{$src1, $dst|$dst, $src1}",
7904 [(set VR128:$dst,
7905 (int_x86_aesni_aesimc VR128:$src1))]>, Sched<[WriteAESIMC]>;
7906 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
7907 (ins i128mem:$src1),
7908 "aesimc\t{$src1, $dst|$dst, $src1}",
7909 [(set VR128:$dst, (int_x86_aesni_aesimc (memopv2i64 addr:$src1)))]>,
7910 Sched<[WriteAESIMCLd]>;
7912 // AES Round Key Generation Assist
7913 let Predicates = [HasAVX, HasAES] in {
7914 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7915 (ins VR128:$src1, i8imm:$src2),
7916 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7917 [(set VR128:$dst,
7918 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7919 Sched<[WriteAESKeyGen]>, VEX;
7920 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7921 (ins i128mem:$src1, i8imm:$src2),
7922 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7923 [(set VR128:$dst,
7924 (int_x86_aesni_aeskeygenassist (loadv2i64 addr:$src1), imm:$src2))]>,
7925 Sched<[WriteAESKeyGenLd]>, VEX;
7926 }
7927 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
7928 (ins VR128:$src1, i8imm:$src2),
7929 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7930 [(set VR128:$dst,
7931 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
7932 Sched<[WriteAESKeyGen]>;
7933 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
7934 (ins i128mem:$src1, i8imm:$src2),
7935 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
7936 [(set VR128:$dst,
7937 (int_x86_aesni_aeskeygenassist (memopv2i64 addr:$src1), imm:$src2))]>,
7938 Sched<[WriteAESKeyGenLd]>;
7940 //===----------------------------------------------------------------------===//
7941 // PCLMUL Instructions
7942 //===----------------------------------------------------------------------===//
7944 // AVX carry-less Multiplication instructions
7945 def VPCLMULQDQrr : AVXPCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7946 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7947 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7948 [(set VR128:$dst,
7949 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))]>,
7950 Sched<[WriteCLMul]>;
7952 def VPCLMULQDQrm : AVXPCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7953 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7954 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
7955 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7956 (loadv2i64 addr:$src2), imm:$src3))]>,
7957 Sched<[WriteCLMulLd, ReadAfterLd]>;
7959 // Carry-less Multiplication instructions
7960 let Constraints = "$src1 = $dst" in {
7961 def PCLMULQDQrr : PCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
7962 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
7963 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7964 [(set VR128:$dst,
7965 (int_x86_pclmulqdq VR128:$src1, VR128:$src2, imm:$src3))],
7966 IIC_SSE_PCLMULQDQ_RR>, Sched<[WriteCLMul]>;
7968 def PCLMULQDQrm : PCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
7969 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
7970 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
7971 [(set VR128:$dst, (int_x86_pclmulqdq VR128:$src1,
7972 (memopv2i64 addr:$src2), imm:$src3))],
7973 IIC_SSE_PCLMULQDQ_RM>,
7974 Sched<[WriteCLMulLd, ReadAfterLd]>;
7975 } // Constraints = "$src1 = $dst"
7978 multiclass pclmul_alias<string asm, int immop> {
7979 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7980 (PCLMULQDQrr VR128:$dst, VR128:$src, immop), 0>;
7982 def : InstAlias<!strconcat("pclmul", asm, "dq {$src, $dst|$dst, $src}"),
7983 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop), 0>;
7985 def : InstAlias<!strconcat("vpclmul", asm,
7986 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7987 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop),
7988 0>;
7990 def : InstAlias<!strconcat("vpclmul", asm,
7991 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
7992 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop),
7993 0>;
7994 }
7995 defm : pclmul_alias<"hqhq", 0x11>;
7996 defm : pclmul_alias<"hqlq", 0x01>;
7997 defm : pclmul_alias<"lqhq", 0x10>;
7998 defm : pclmul_alias<"lqlq", 0x00>;
8000 //===----------------------------------------------------------------------===//
8001 // SSE4A Instructions
8002 //===----------------------------------------------------------------------===//
8004 let Predicates = [HasSSE4A] in {
8006 let Constraints = "$src = $dst" in {
8007 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
8008 (ins VR128:$src, i8imm:$len, i8imm:$idx),
8009 "extrq\t{$idx, $len, $src|$src, $len, $idx}",
8010 [(set VR128:$dst, (int_x86_sse4a_extrqi VR128:$src, imm:$len,
8011 imm:$idx))]>, PD;
8012 def EXTRQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
8013 (ins VR128:$src, VR128:$mask),
8014 "extrq\t{$mask, $src|$src, $mask}",
8015 [(set VR128:$dst, (int_x86_sse4a_extrq VR128:$src,
8016 VR128:$mask))]>, PD;
8018 def INSERTQI : Ii8<0x78, MRMSrcReg, (outs VR128:$dst),
8019 (ins VR128:$src, VR128:$src2, i8imm:$len, i8imm:$idx),
8020 "insertq\t{$idx, $len, $src2, $src|$src, $src2, $len, $idx}",
8021 [(set VR128:$dst, (int_x86_sse4a_insertqi VR128:$src,
8022 VR128:$src2, imm:$len, imm:$idx))]>, XD;
8023 def INSERTQ : I<0x79, MRMSrcReg, (outs VR128:$dst),
8024 (ins VR128:$src, VR128:$mask),
8025 "insertq\t{$mask, $src|$src, $mask}",
8026 [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
8027 VR128:$mask))]>, XD;
8028 }
8030 def MOVNTSS : I<0x2B, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
8031 "movntss\t{$src, $dst|$dst, $src}",
8032 [(int_x86_sse4a_movnt_ss addr:$dst, VR128:$src)]>, XS;
8034 def MOVNTSD : I<0x2B, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
8035 "movntsd\t{$src, $dst|$dst, $src}",
8036 [(int_x86_sse4a_movnt_sd addr:$dst, VR128:$src)]>, XD;
8037 }
8039 //===----------------------------------------------------------------------===//
8040 // AVX Instructions
8041 //===----------------------------------------------------------------------===//
8043 //===----------------------------------------------------------------------===//
8044 // VBROADCAST - Load from memory and broadcast to all elements of the
8045 // destination operand
8046 //
8047 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
8048 X86MemOperand x86memop, Intrinsic Int, SchedWrite Sched> :
8049 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8050 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8051 [(set RC:$dst, (Int addr:$src))]>, Sched<[Sched]>, VEX;
8053 class avx_broadcast_no_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
8054 X86MemOperand x86memop, ValueType VT,
8055 PatFrag ld_frag, SchedWrite Sched> :
8056 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8057 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8058 [(set RC:$dst, (VT (X86VBroadcast (ld_frag addr:$src))))]>,
8059 Sched<[Sched]>, VEX {
8060 let mayLoad = 1;
8061 }
8063 // AVX2 adds register forms
8064 class avx2_broadcast_reg<bits<8> opc, string OpcodeStr, RegisterClass RC,
8065 Intrinsic Int, SchedWrite Sched> :
8066 AVX28I<opc, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8067 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8068 [(set RC:$dst, (Int VR128:$src))]>, Sched<[Sched]>, VEX;
8070 let ExeDomain = SSEPackedSingle in {
8071 def VBROADCASTSSrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR128,
8072 f32mem, v4f32, loadf32, WriteLoad>;
8073 def VBROADCASTSSYrm : avx_broadcast_no_int<0x18, "vbroadcastss", VR256,
8074 f32mem, v8f32, loadf32,
8075 WriteFShuffleLd>, VEX_L;
8076 }
8077 let ExeDomain = SSEPackedDouble in
8078 def VBROADCASTSDYrm : avx_broadcast_no_int<0x19, "vbroadcastsd", VR256, f64mem,
8079 v4f64, loadf64, WriteFShuffleLd>, VEX_L;
8080 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
8081 int_x86_avx_vbroadcastf128_pd_256,
8082 WriteFShuffleLd>, VEX_L;
8084 let ExeDomain = SSEPackedSingle in {
8085 def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128,
8086 int_x86_avx2_vbroadcast_ss_ps,
8087 WriteFShuffle>;
8088 def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256,
8089 int_x86_avx2_vbroadcast_ss_ps_256,
8090 WriteFShuffle256>, VEX_L;
8091 }
8092 let ExeDomain = SSEPackedDouble in
8093 def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256,
8094 int_x86_avx2_vbroadcast_sd_pd_256,
8095 WriteFShuffle256>, VEX_L;
8097 let Predicates = [HasAVX2] in
8098 def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem,
8099 int_x86_avx2_vbroadcasti128, WriteLoad>,
8100 VEX_L;
8102 let Predicates = [HasAVX] in
8103 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
8104 (VBROADCASTF128 addr:$src)>;
8107 //===----------------------------------------------------------------------===//
8108 // VINSERTF128 - Insert packed floating-point values
8109 //
8110 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
8111 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
8112 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
8113 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8114 []>, Sched<[WriteFShuffle]>, VEX_4V, VEX_L;
8115 let mayLoad = 1 in
8116 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
8117 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
8118 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8119 []>, Sched<[WriteFShuffleLd, ReadAfterLd]>, VEX_4V, VEX_L;
8120 }
8122 let Predicates = [HasAVX] in {
8123 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
8124 (iPTR imm)),
8125 (VINSERTF128rr VR256:$src1, VR128:$src2,
8126 (INSERT_get_vinsert128_imm VR256:$ins))>;
8127 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
8128 (iPTR imm)),
8129 (VINSERTF128rr VR256:$src1, VR128:$src2,
8130 (INSERT_get_vinsert128_imm VR256:$ins))>;
8132 def : Pat<(vinsert128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2),
8133 (iPTR imm)),
8134 (VINSERTF128rm VR256:$src1, addr:$src2,
8135 (INSERT_get_vinsert128_imm VR256:$ins))>;
8136 def : Pat<(vinsert128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2),
8137 (iPTR imm)),
8138 (VINSERTF128rm VR256:$src1, addr:$src2,
8139 (INSERT_get_vinsert128_imm VR256:$ins))>;
8140 }
8142 let Predicates = [HasAVX1Only] in {
8143 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8144 (iPTR imm)),
8145 (VINSERTF128rr VR256:$src1, VR128:$src2,
8146 (INSERT_get_vinsert128_imm VR256:$ins))>;
8147 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8148 (iPTR imm)),
8149 (VINSERTF128rr VR256:$src1, VR128:$src2,
8150 (INSERT_get_vinsert128_imm VR256:$ins))>;
8151 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8152 (iPTR imm)),
8153 (VINSERTF128rr VR256:$src1, VR128:$src2,
8154 (INSERT_get_vinsert128_imm VR256:$ins))>;
8155 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8156 (iPTR imm)),
8157 (VINSERTF128rr VR256:$src1, VR128:$src2,
8158 (INSERT_get_vinsert128_imm VR256:$ins))>;
8160 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8161 (iPTR imm)),
8162 (VINSERTF128rm VR256:$src1, addr:$src2,
8163 (INSERT_get_vinsert128_imm VR256:$ins))>;
8164 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8165 (bc_v4i32 (loadv2i64 addr:$src2)),
8166 (iPTR imm)),
8167 (VINSERTF128rm VR256:$src1, addr:$src2,
8168 (INSERT_get_vinsert128_imm VR256:$ins))>;
8169 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8170 (bc_v16i8 (loadv2i64 addr:$src2)),
8171 (iPTR imm)),
8172 (VINSERTF128rm VR256:$src1, addr:$src2,
8173 (INSERT_get_vinsert128_imm VR256:$ins))>;
8174 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8175 (bc_v8i16 (loadv2i64 addr:$src2)),
8176 (iPTR imm)),
8177 (VINSERTF128rm VR256:$src1, addr:$src2,
8178 (INSERT_get_vinsert128_imm VR256:$ins))>;
8179 }
8181 //===----------------------------------------------------------------------===//
8182 // VEXTRACTF128 - Extract packed floating-point values
8183 //
8184 let hasSideEffects = 0, ExeDomain = SSEPackedSingle in {
8185 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
8186 (ins VR256:$src1, i8imm:$src2),
8187 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8188 []>, Sched<[WriteFShuffle]>, VEX, VEX_L;
8189 let mayStore = 1 in
8190 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
8191 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
8192 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8193 []>, Sched<[WriteStore]>, VEX, VEX_L;
8194 }
8196 // AVX1 patterns
8197 let Predicates = [HasAVX] in {
8198 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8199 (v4f32 (VEXTRACTF128rr
8200 (v8f32 VR256:$src1),
8201 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8202 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8203 (v2f64 (VEXTRACTF128rr
8204 (v4f64 VR256:$src1),
8205 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8207 def : Pat<(store (v4f32 (vextract128_extract:$ext (v8f32 VR256:$src1),
8208 (iPTR imm))), addr:$dst),
8209 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8210 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8211 def : Pat<(store (v2f64 (vextract128_extract:$ext (v4f64 VR256:$src1),
8212 (iPTR imm))), addr:$dst),
8213 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8214 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8215 }
8217 let Predicates = [HasAVX1Only] in {
8218 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8219 (v2i64 (VEXTRACTF128rr
8220 (v4i64 VR256:$src1),
8221 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8222 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8223 (v4i32 (VEXTRACTF128rr
8224 (v8i32 VR256:$src1),
8225 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8226 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8227 (v8i16 (VEXTRACTF128rr
8228 (v16i16 VR256:$src1),
8229 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8230 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8231 (v16i8 (VEXTRACTF128rr
8232 (v32i8 VR256:$src1),
8233 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8235 def : Pat<(alignedstore (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8236 (iPTR imm))), addr:$dst),
8237 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8238 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8239 def : Pat<(alignedstore (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8240 (iPTR imm))), addr:$dst),
8241 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8242 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8243 def : Pat<(alignedstore (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8244 (iPTR imm))), addr:$dst),
8245 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8246 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8247 def : Pat<(alignedstore (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8248 (iPTR imm))), addr:$dst),
8249 (VEXTRACTF128mr addr:$dst, VR256:$src1,
8250 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8251 }
8253 //===----------------------------------------------------------------------===//
8254 // VMASKMOV - Conditional SIMD Packed Loads and Stores
8255 //
8256 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
8257 Intrinsic IntLd, Intrinsic IntLd256,
8258 Intrinsic IntSt, Intrinsic IntSt256> {
8259 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
8260 (ins VR128:$src1, f128mem:$src2),
8261 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8262 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
8263 VEX_4V;
8264 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
8265 (ins VR256:$src1, f256mem:$src2),
8266 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8267 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8268 VEX_4V, VEX_L;
8269 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
8270 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
8271 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8272 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8273 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
8274 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
8275 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8276 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8277 }
8279 let ExeDomain = SSEPackedSingle in
8280 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
8281 int_x86_avx_maskload_ps,
8282 int_x86_avx_maskload_ps_256,
8283 int_x86_avx_maskstore_ps,
8284 int_x86_avx_maskstore_ps_256>;
8285 let ExeDomain = SSEPackedDouble in
8286 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
8287 int_x86_avx_maskload_pd,
8288 int_x86_avx_maskload_pd_256,
8289 int_x86_avx_maskstore_pd,
8290 int_x86_avx_maskstore_pd_256>;
8292 //===----------------------------------------------------------------------===//
8293 // VPERMIL - Permute Single and Double Floating-Point Values
8294 //
8295 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
8296 RegisterClass RC, X86MemOperand x86memop_f,
8297 X86MemOperand x86memop_i, PatFrag i_frag,
8298 Intrinsic IntVar, ValueType vt> {
8299 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
8300 (ins RC:$src1, RC:$src2),
8301 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8302 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V,
8303 Sched<[WriteFShuffle]>;
8304 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
8305 (ins RC:$src1, x86memop_i:$src2),
8306 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8307 [(set RC:$dst, (IntVar RC:$src1,
8308 (bitconvert (i_frag addr:$src2))))]>, VEX_4V,
8309 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8311 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
8312 (ins RC:$src1, i8imm:$src2),
8313 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8314 [(set RC:$dst, (vt (X86VPermilpi RC:$src1, (i8 imm:$src2))))]>, VEX,
8315 Sched<[WriteFShuffle]>;
8316 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
8317 (ins x86memop_f:$src1, i8imm:$src2),
8318 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8319 [(set RC:$dst,
8320 (vt (X86VPermilpi (memop addr:$src1), (i8 imm:$src2))))]>, VEX,
8321 Sched<[WriteFShuffleLd]>;
8322 }
8324 let ExeDomain = SSEPackedSingle in {
8325 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
8326 loadv2i64, int_x86_avx_vpermilvar_ps, v4f32>;
8327 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
8328 loadv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L;
8329 }
8330 let ExeDomain = SSEPackedDouble in {
8331 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
8332 loadv2i64, int_x86_avx_vpermilvar_pd, v2f64>;
8333 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
8334 loadv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L;
8335 }
8337 let Predicates = [HasAVX] in {
8338 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (v8i32 VR256:$src2))),
8339 (VPERMILPSYrr VR256:$src1, VR256:$src2)>;
8340 def : Pat<(v8f32 (X86VPermilpv VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)))),
8341 (VPERMILPSYrm VR256:$src1, addr:$src2)>;
8342 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (v4i64 VR256:$src2))),
8343 (VPERMILPDYrr VR256:$src1, VR256:$src2)>;
8344 def : Pat<(v4f64 (X86VPermilpv VR256:$src1, (loadv4i64 addr:$src2))),
8345 (VPERMILPDYrm VR256:$src1, addr:$src2)>;
8347 def : Pat<(v8i32 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8348 (VPERMILPSYri VR256:$src1, imm:$imm)>;
8349 def : Pat<(v4i64 (X86VPermilpi VR256:$src1, (i8 imm:$imm))),
8350 (VPERMILPDYri VR256:$src1, imm:$imm)>;
8351 def : Pat<(v8i32 (X86VPermilpi (bc_v8i32 (loadv4i64 addr:$src1)),
8352 (i8 imm:$imm))),
8353 (VPERMILPSYmi addr:$src1, imm:$imm)>;
8354 def : Pat<(v4i64 (X86VPermilpi (loadv4i64 addr:$src1), (i8 imm:$imm))),
8355 (VPERMILPDYmi addr:$src1, imm:$imm)>;
8357 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (v4i32 VR128:$src2))),
8358 (VPERMILPSrr VR128:$src1, VR128:$src2)>;
8359 def : Pat<(v4f32 (X86VPermilpv VR128:$src1, (bc_v4i32 (loadv2i64 addr:$src2)))),
8360 (VPERMILPSrm VR128:$src1, addr:$src2)>;
8361 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (v2i64 VR128:$src2))),
8362 (VPERMILPDrr VR128:$src1, VR128:$src2)>;
8363 def : Pat<(v2f64 (X86VPermilpv VR128:$src1, (loadv2i64 addr:$src2))),
8364 (VPERMILPDrm VR128:$src1, addr:$src2)>;
8366 def : Pat<(v2i64 (X86VPermilpi VR128:$src1, (i8 imm:$imm))),
8367 (VPERMILPDri VR128:$src1, imm:$imm)>;
8368 def : Pat<(v2i64 (X86VPermilpi (loadv2i64 addr:$src1), (i8 imm:$imm))),
8369 (VPERMILPDmi addr:$src1, imm:$imm)>;
8370 }
8372 //===----------------------------------------------------------------------===//
8373 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
8374 //
8375 let ExeDomain = SSEPackedSingle in {
8376 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
8377 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
8378 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8379 [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8380 (i8 imm:$src3))))]>, VEX_4V, VEX_L,
8381 Sched<[WriteFShuffle]>;
8382 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
8383 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
8384 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8385 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv8f32 addr:$src2),
8386 (i8 imm:$src3)))]>, VEX_4V, VEX_L,
8387 Sched<[WriteFShuffleLd, ReadAfterLd]>;
8388 }
8390 let Predicates = [HasAVX] in {
8391 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8392 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8393 def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1,
8394 (loadv4f64 addr:$src2), (i8 imm:$imm))),
8395 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8396 }
8398 let Predicates = [HasAVX1Only] in {
8399 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8400 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8401 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8402 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8403 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8404 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8405 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8406 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8408 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1,
8409 (bc_v8i32 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8410 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8411 def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1,
8412 (loadv4i64 addr:$src2), (i8 imm:$imm))),
8413 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8414 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1,
8415 (bc_v32i8 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8416 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8417 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8418 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8419 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>;
8420 }
8422 //===----------------------------------------------------------------------===//
8423 // VZERO - Zero YMM registers
8424 //
8425 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
8426 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
8427 // Zero All YMM registers
8428 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
8429 [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
8431 // Zero Upper bits of YMM registers
8432 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
8433 [(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
8434 }
8436 //===----------------------------------------------------------------------===//
8437 // Half precision conversion instructions
8438 //===----------------------------------------------------------------------===//
8439 multiclass f16c_ph2ps<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8440 def rr : I<0x13, MRMSrcReg, (outs RC:$dst), (ins VR128:$src),
8441 "vcvtph2ps\t{$src, $dst|$dst, $src}",
8442 [(set RC:$dst, (Int VR128:$src))]>,
8443 T8PD, VEX, Sched<[WriteCvtF2F]>;
8444 let hasSideEffects = 0, mayLoad = 1 in
8445 def rm : I<0x13, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
8446 "vcvtph2ps\t{$src, $dst|$dst, $src}", []>, T8PD, VEX,
8447 Sched<[WriteCvtF2FLd]>;
8448 }
8450 multiclass f16c_ps2ph<RegisterClass RC, X86MemOperand x86memop, Intrinsic Int> {
8451 def rr : Ii8<0x1D, MRMDestReg, (outs VR128:$dst),
8452 (ins RC:$src1, i32i8imm:$src2),
8453 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8454 [(set VR128:$dst, (Int RC:$src1, imm:$src2))]>,
8455 TAPD, VEX, Sched<[WriteCvtF2F]>;
8456 let hasSideEffects = 0, mayStore = 1,
8457 SchedRW = [WriteCvtF2FLd, WriteRMW] in
8458 def mr : Ii8<0x1D, MRMDestMem, (outs),
8459 (ins x86memop:$dst, RC:$src1, i32i8imm:$src2),
8460 "vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8461 TAPD, VEX;
8462 }
8464 let Predicates = [HasF16C] in {
8465 defm VCVTPH2PS : f16c_ph2ps<VR128, f64mem, int_x86_vcvtph2ps_128>;
8466 defm VCVTPH2PSY : f16c_ph2ps<VR256, f128mem, int_x86_vcvtph2ps_256>, VEX_L;
8467 defm VCVTPS2PH : f16c_ps2ph<VR128, f64mem, int_x86_vcvtps2ph_128>;
8468 defm VCVTPS2PHY : f16c_ps2ph<VR256, f128mem, int_x86_vcvtps2ph_256>, VEX_L;
8470 // Pattern match vcvtph2ps of a scalar i64 load.
8471 def : Pat<(int_x86_vcvtph2ps_128 (vzmovl_v2i64 addr:$src)),
8472 (VCVTPH2PSrm addr:$src)>;
8473 def : Pat<(int_x86_vcvtph2ps_128 (vzload_v2i64 addr:$src)),
8474 (VCVTPH2PSrm addr:$src)>;
8475 }
8477 // Patterns for matching conversions from float to half-float and vice versa.
8478 let Predicates = [HasF16C] in {
8479 def : Pat<(fp_to_f16 FR32:$src),
8480 (i16 (EXTRACT_SUBREG (VMOVPDI2DIrr (VCVTPS2PHrr
8481 (COPY_TO_REGCLASS FR32:$src, VR128), 0)), sub_16bit))>;
8483 def : Pat<(f16_to_fp GR16:$src),
8484 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8485 (COPY_TO_REGCLASS (MOVSX32rr16 GR16:$src), VR128)), FR32)) >;
8487 def : Pat<(f16_to_fp (i16 (fp_to_f16 FR32:$src))),
8488 (f32 (COPY_TO_REGCLASS (VCVTPH2PSrr
8489 (VCVTPS2PHrr (COPY_TO_REGCLASS FR32:$src, VR128), 0)), FR32)) >;
8490 }
8492 //===----------------------------------------------------------------------===//
8493 // AVX2 Instructions
8494 //===----------------------------------------------------------------------===//
8496 /// AVX2_binop_rmi_int - AVX2 binary operator with 8-bit immediate
8497 multiclass AVX2_binop_rmi_int<bits<8> opc, string OpcodeStr,
8498 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
8499 X86MemOperand x86memop> {
8500 let isCommutable = 1 in
8501 def rri : AVX2AIi8<opc, MRMSrcReg, (outs RC:$dst),
8502 (ins RC:$src1, RC:$src2, i8imm:$src3),
8503 !strconcat(OpcodeStr,
8504 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8505 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
8506 Sched<[WriteBlend]>, VEX_4V;
8507 def rmi : AVX2AIi8<opc, MRMSrcMem, (outs RC:$dst),
8508 (ins RC:$src1, x86memop:$src2, i8imm:$src3),
8509 !strconcat(OpcodeStr,
8510 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
8511 [(set RC:$dst,
8512 (IntId RC:$src1,
8513 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
8514 Sched<[WriteBlendLd, ReadAfterLd]>, VEX_4V;
8515 }
8517 defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128,
8518 VR128, loadv2i64, i128mem>;
8519 defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256,
8520 VR256, loadv4i64, i256mem>, VEX_L;
8522 def : Pat<(v4i32 (X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2),
8523 imm:$mask)),
8524 (VPBLENDDrri VR128:$src1, VR128:$src2, imm:$mask)>;
8525 def : Pat<(v8i32 (X86Blendi (v8i32 VR256:$src1), (v8i32 VR256:$src2),
8526 imm:$mask)),
8527 (VPBLENDDYrri VR256:$src1, VR256:$src2, imm:$mask)>;
8529 //===----------------------------------------------------------------------===//
8530 // VPBROADCAST - Load from memory and broadcast to all elements of the
8531 // destination operand
8532 //
8533 multiclass avx2_broadcast<bits<8> opc, string OpcodeStr,
8534 X86MemOperand x86memop, PatFrag ld_frag,
8535 Intrinsic Int128, Intrinsic Int256> {
8536 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
8537 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8538 [(set VR128:$dst, (Int128 VR128:$src))]>,
8539 Sched<[WriteShuffle]>, VEX;
8540 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst), (ins x86memop:$src),
8541 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8542 [(set VR128:$dst,
8543 (Int128 (scalar_to_vector (ld_frag addr:$src))))]>,
8544 Sched<[WriteLoad]>, VEX;
8545 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
8546 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8547 [(set VR256:$dst, (Int256 VR128:$src))]>,
8548 Sched<[WriteShuffle256]>, VEX, VEX_L;
8549 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst), (ins x86memop:$src),
8550 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
8551 [(set VR256:$dst,
8552 (Int256 (scalar_to_vector (ld_frag addr:$src))))]>,
8553 Sched<[WriteLoad]>, VEX, VEX_L;
8554 }
8556 defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8,
8557 int_x86_avx2_pbroadcastb_128,
8558 int_x86_avx2_pbroadcastb_256>;
8559 defm VPBROADCASTW : avx2_broadcast<0x79, "vpbroadcastw", i16mem, loadi16,
8560 int_x86_avx2_pbroadcastw_128,
8561 int_x86_avx2_pbroadcastw_256>;
8562 defm VPBROADCASTD : avx2_broadcast<0x58, "vpbroadcastd", i32mem, loadi32,
8563 int_x86_avx2_pbroadcastd_128,
8564 int_x86_avx2_pbroadcastd_256>;
8565 defm VPBROADCASTQ : avx2_broadcast<0x59, "vpbroadcastq", i64mem, loadi64,
8566 int_x86_avx2_pbroadcastq_128,
8567 int_x86_avx2_pbroadcastq_256>;
8569 let Predicates = [HasAVX2] in {
8570 def : Pat<(v16i8 (X86VBroadcast (loadi8 addr:$src))),
8571 (VPBROADCASTBrm addr:$src)>;
8572 def : Pat<(v32i8 (X86VBroadcast (loadi8 addr:$src))),
8573 (VPBROADCASTBYrm addr:$src)>;
8574 def : Pat<(v8i16 (X86VBroadcast (loadi16 addr:$src))),
8575 (VPBROADCASTWrm addr:$src)>;
8576 def : Pat<(v16i16 (X86VBroadcast (loadi16 addr:$src))),
8577 (VPBROADCASTWYrm addr:$src)>;
8578 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8579 (VPBROADCASTDrm addr:$src)>;
8580 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8581 (VPBROADCASTDYrm addr:$src)>;
8582 def : Pat<(v2i64 (X86VBroadcast (loadi64 addr:$src))),
8583 (VPBROADCASTQrm addr:$src)>;
8584 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8585 (VPBROADCASTQYrm addr:$src)>;
8587 def : Pat<(v16i8 (X86VBroadcast (v16i8 VR128:$src))),
8588 (VPBROADCASTBrr VR128:$src)>;
8589 def : Pat<(v32i8 (X86VBroadcast (v16i8 VR128:$src))),
8590 (VPBROADCASTBYrr VR128:$src)>;
8591 def : Pat<(v8i16 (X86VBroadcast (v8i16 VR128:$src))),
8592 (VPBROADCASTWrr VR128:$src)>;
8593 def : Pat<(v16i16 (X86VBroadcast (v8i16 VR128:$src))),
8594 (VPBROADCASTWYrr VR128:$src)>;
8595 def : Pat<(v4i32 (X86VBroadcast (v4i32 VR128:$src))),
8596 (VPBROADCASTDrr VR128:$src)>;
8597 def : Pat<(v8i32 (X86VBroadcast (v4i32 VR128:$src))),
8598 (VPBROADCASTDYrr VR128:$src)>;
8599 def : Pat<(v2i64 (X86VBroadcast (v2i64 VR128:$src))),
8600 (VPBROADCASTQrr VR128:$src)>;
8601 def : Pat<(v4i64 (X86VBroadcast (v2i64 VR128:$src))),
8602 (VPBROADCASTQYrr VR128:$src)>;
8603 def : Pat<(v4f32 (X86VBroadcast (v4f32 VR128:$src))),
8604 (VBROADCASTSSrr VR128:$src)>;
8605 def : Pat<(v8f32 (X86VBroadcast (v4f32 VR128:$src))),
8606 (VBROADCASTSSYrr VR128:$src)>;
8607 def : Pat<(v2f64 (X86VBroadcast (v2f64 VR128:$src))),
8608 (VPBROADCASTQrr VR128:$src)>;
8609 def : Pat<(v4f64 (X86VBroadcast (v2f64 VR128:$src))),
8610 (VBROADCASTSDYrr VR128:$src)>;
8612 // Provide aliases for broadcast from the same regitser class that
8613 // automatically does the extract.
8614 def : Pat<(v32i8 (X86VBroadcast (v32i8 VR256:$src))),
8615 (VPBROADCASTBYrr (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src),
8616 sub_xmm)))>;
8617 def : Pat<(v16i16 (X86VBroadcast (v16i16 VR256:$src))),
8618 (VPBROADCASTWYrr (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src),
8619 sub_xmm)))>;
8620 def : Pat<(v8i32 (X86VBroadcast (v8i32 VR256:$src))),
8621 (VPBROADCASTDYrr (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src),
8622 sub_xmm)))>;
8623 def : Pat<(v4i64 (X86VBroadcast (v4i64 VR256:$src))),
8624 (VPBROADCASTQYrr (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src),
8625 sub_xmm)))>;
8626 def : Pat<(v8f32 (X86VBroadcast (v8f32 VR256:$src))),
8627 (VBROADCASTSSYrr (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src),
8628 sub_xmm)))>;
8629 def : Pat<(v4f64 (X86VBroadcast (v4f64 VR256:$src))),
8630 (VBROADCASTSDYrr (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src),
8631 sub_xmm)))>;
8633 // Provide fallback in case the load node that is used in the patterns above
8634 // is used by additional users, which prevents the pattern selection.
8635 let AddedComplexity = 20 in {
8636 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8637 (VBROADCASTSSrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8638 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8639 (VBROADCASTSSYrr (COPY_TO_REGCLASS FR32:$src, VR128))>;
8640 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8641 (VBROADCASTSDYrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8643 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8644 (VBROADCASTSSrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8645 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8646 (VBROADCASTSSYrr (COPY_TO_REGCLASS GR32:$src, VR128))>;
8647 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8648 (VBROADCASTSDYrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8650 def : Pat<(v16i8 (X86VBroadcast GR8:$src)),
8651 (VPBROADCASTBrr (COPY_TO_REGCLASS
8652 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8653 VR128))>;
8654 def : Pat<(v32i8 (X86VBroadcast GR8:$src)),
8655 (VPBROADCASTBYrr (COPY_TO_REGCLASS
8656 (i32 (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
8657 VR128))>;
8659 def : Pat<(v8i16 (X86VBroadcast GR16:$src)),
8660 (VPBROADCASTWrr (COPY_TO_REGCLASS
8661 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8662 VR128))>;
8663 def : Pat<(v16i16 (X86VBroadcast GR16:$src)),
8664 (VPBROADCASTWYrr (COPY_TO_REGCLASS
8665 (i32 (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit)),
8666 VR128))>;
8668 // The patterns for VPBROADCASTD are not needed because they would match
8669 // the exact same thing as VBROADCASTSS patterns.
8671 def : Pat<(v2i64 (X86VBroadcast GR64:$src)),
8672 (VPBROADCASTQrr (COPY_TO_REGCLASS GR64:$src, VR128))>;
8673 // The v4i64 pattern is not needed because VBROADCASTSDYrr already match.
8674 }
8675 }
8677 // AVX1 broadcast patterns
8678 let Predicates = [HasAVX1Only] in {
8679 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
8680 (VBROADCASTSSYrm addr:$src)>;
8681 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
8682 (VBROADCASTSDYrm addr:$src)>;
8683 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
8684 (VBROADCASTSSrm addr:$src)>;
8685 }
8687 let Predicates = [HasAVX] in {
8688 // Provide fallback in case the load node that is used in the patterns above
8689 // is used by additional users, which prevents the pattern selection.
8690 let AddedComplexity = 20 in {
8691 // 128bit broadcasts:
8692 def : Pat<(v4f32 (X86VBroadcast FR32:$src)),
8693 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0)>;
8694 def : Pat<(v8f32 (X86VBroadcast FR32:$src)),
8695 (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
8696 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), sub_xmm),
8697 (VPSHUFDri (COPY_TO_REGCLASS FR32:$src, VR128), 0), 1)>;
8698 def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
8699 (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
8700 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), sub_xmm),
8701 (VPSHUFDri (COPY_TO_REGCLASS FR64:$src, VR128), 0x44), 1)>;
8703 def : Pat<(v4i32 (X86VBroadcast GR32:$src)),
8704 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0)>;
8705 def : Pat<(v8i32 (X86VBroadcast GR32:$src)),
8706 (VINSERTF128rr (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)),
8707 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), sub_xmm),
8708 (VPSHUFDri (COPY_TO_REGCLASS GR32:$src, VR128), 0), 1)>;
8709 def : Pat<(v4i64 (X86VBroadcast GR64:$src)),
8710 (VINSERTF128rr (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)),
8711 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), sub_xmm),
8712 (VPSHUFDri (COPY_TO_REGCLASS GR64:$src, VR128), 0x44), 1)>;
8713 }
8715 def : Pat<(v2f64 (X86VBroadcast f64:$src)),
8716 (VMOVDDUPrr (COPY_TO_REGCLASS FR64:$src, VR128))>;
8717 }
8719 //===----------------------------------------------------------------------===//
8720 // VPERM - Permute instructions
8721 //
8723 multiclass avx2_perm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8724 ValueType OpVT, X86FoldableSchedWrite Sched> {
8725 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
8726 (ins VR256:$src1, VR256:$src2),
8727 !strconcat(OpcodeStr,
8728 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8729 [(set VR256:$dst,
8730 (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>,
8731 Sched<[Sched]>, VEX_4V, VEX_L;
8732 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
8733 (ins VR256:$src1, i256mem:$src2),
8734 !strconcat(OpcodeStr,
8735 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8736 [(set VR256:$dst,
8737 (OpVT (X86VPermv VR256:$src1,
8738 (bitconvert (mem_frag addr:$src2)))))]>,
8739 Sched<[Sched.Folded, ReadAfterLd]>, VEX_4V, VEX_L;
8740 }
8742 defm VPERMD : avx2_perm<0x36, "vpermd", loadv4i64, v8i32, WriteShuffle256>;
8743 let ExeDomain = SSEPackedSingle in
8744 defm VPERMPS : avx2_perm<0x16, "vpermps", loadv8f32, v8f32, WriteFShuffle256>;
8746 multiclass avx2_perm_imm<bits<8> opc, string OpcodeStr, PatFrag mem_frag,
8747 ValueType OpVT, X86FoldableSchedWrite Sched> {
8748 def Yri : AVX2AIi8<opc, MRMSrcReg, (outs VR256:$dst),
8749 (ins VR256:$src1, i8imm:$src2),
8750 !strconcat(OpcodeStr,
8751 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8752 [(set VR256:$dst,
8753 (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>,
8754 Sched<[Sched]>, VEX, VEX_L;
8755 def Ymi : AVX2AIi8<opc, MRMSrcMem, (outs VR256:$dst),
8756 (ins i256mem:$src1, i8imm:$src2),
8757 !strconcat(OpcodeStr,
8758 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8759 [(set VR256:$dst,
8760 (OpVT (X86VPermi (mem_frag addr:$src1),
8761 (i8 imm:$src2))))]>,
8762 Sched<[Sched.Folded, ReadAfterLd]>, VEX, VEX_L;
8763 }
8765 defm VPERMQ : avx2_perm_imm<0x00, "vpermq", loadv4i64, v4i64,
8766 WriteShuffle256>, VEX_W;
8767 let ExeDomain = SSEPackedDouble in
8768 defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", loadv4f64, v4f64,
8769 WriteFShuffle256>, VEX_W;
8771 //===----------------------------------------------------------------------===//
8772 // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks
8773 //
8774 def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst),
8775 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
8776 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8777 [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2,
8778 (i8 imm:$src3))))]>, Sched<[WriteShuffle256]>,
8779 VEX_4V, VEX_L;
8780 def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst),
8781 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
8782 "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8783 [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (loadv4i64 addr:$src2),
8784 (i8 imm:$src3)))]>,
8785 Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8787 let Predicates = [HasAVX2] in {
8788 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8789 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8790 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8791 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8792 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
8793 (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>;
8795 def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (loadv4i64 addr:$src2)),
8796 (i8 imm:$imm))),
8797 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8798 def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1,
8799 (bc_v16i16 (loadv4i64 addr:$src2)), (i8 imm:$imm))),
8800 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8801 def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (loadv4i64 addr:$src2)),
8802 (i8 imm:$imm))),
8803 (VPERM2I128rm VR256:$src1, addr:$src2, imm:$imm)>;
8804 }
8807 //===----------------------------------------------------------------------===//
8808 // VINSERTI128 - Insert packed integer values
8809 //
8810 let hasSideEffects = 0 in {
8811 def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst),
8812 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
8813 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8814 []>, Sched<[WriteShuffle256]>, VEX_4V, VEX_L;
8815 let mayLoad = 1 in
8816 def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst),
8817 (ins VR256:$src1, i128mem:$src2, i8imm:$src3),
8818 "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
8819 []>, Sched<[WriteShuffle256Ld, ReadAfterLd]>, VEX_4V, VEX_L;
8820 }
8822 let Predicates = [HasAVX2] in {
8823 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
8824 (iPTR imm)),
8825 (VINSERTI128rr VR256:$src1, VR128:$src2,
8826 (INSERT_get_vinsert128_imm VR256:$ins))>;
8827 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
8828 (iPTR imm)),
8829 (VINSERTI128rr VR256:$src1, VR128:$src2,
8830 (INSERT_get_vinsert128_imm VR256:$ins))>;
8831 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
8832 (iPTR imm)),
8833 (VINSERTI128rr VR256:$src1, VR128:$src2,
8834 (INSERT_get_vinsert128_imm VR256:$ins))>;
8835 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
8836 (iPTR imm)),
8837 (VINSERTI128rr VR256:$src1, VR128:$src2,
8838 (INSERT_get_vinsert128_imm VR256:$ins))>;
8840 def : Pat<(vinsert128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2),
8841 (iPTR imm)),
8842 (VINSERTI128rm VR256:$src1, addr:$src2,
8843 (INSERT_get_vinsert128_imm VR256:$ins))>;
8844 def : Pat<(vinsert128_insert:$ins (v8i32 VR256:$src1),
8845 (bc_v4i32 (loadv2i64 addr:$src2)),
8846 (iPTR imm)),
8847 (VINSERTI128rm VR256:$src1, addr:$src2,
8848 (INSERT_get_vinsert128_imm VR256:$ins))>;
8849 def : Pat<(vinsert128_insert:$ins (v32i8 VR256:$src1),
8850 (bc_v16i8 (loadv2i64 addr:$src2)),
8851 (iPTR imm)),
8852 (VINSERTI128rm VR256:$src1, addr:$src2,
8853 (INSERT_get_vinsert128_imm VR256:$ins))>;
8854 def : Pat<(vinsert128_insert:$ins (v16i16 VR256:$src1),
8855 (bc_v8i16 (loadv2i64 addr:$src2)),
8856 (iPTR imm)),
8857 (VINSERTI128rm VR256:$src1, addr:$src2,
8858 (INSERT_get_vinsert128_imm VR256:$ins))>;
8859 }
8861 //===----------------------------------------------------------------------===//
8862 // VEXTRACTI128 - Extract packed integer values
8863 //
8864 def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst),
8865 (ins VR256:$src1, i8imm:$src2),
8866 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
8867 [(set VR128:$dst,
8868 (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>,
8869 Sched<[WriteShuffle256]>, VEX, VEX_L;
8870 let hasSideEffects = 0, mayStore = 1 in
8871 def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs),
8872 (ins i128mem:$dst, VR256:$src1, i8imm:$src2),
8873 "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
8874 Sched<[WriteStore]>, VEX, VEX_L;
8876 let Predicates = [HasAVX2] in {
8877 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8878 (v2i64 (VEXTRACTI128rr
8879 (v4i64 VR256:$src1),
8880 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8881 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8882 (v4i32 (VEXTRACTI128rr
8883 (v8i32 VR256:$src1),
8884 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8885 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8886 (v8i16 (VEXTRACTI128rr
8887 (v16i16 VR256:$src1),
8888 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8889 def : Pat<(vextract128_extract:$ext VR256:$src1, (iPTR imm)),
8890 (v16i8 (VEXTRACTI128rr
8891 (v32i8 VR256:$src1),
8892 (EXTRACT_get_vextract128_imm VR128:$ext)))>;
8894 def : Pat<(store (v2i64 (vextract128_extract:$ext (v4i64 VR256:$src1),
8895 (iPTR imm))), addr:$dst),
8896 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8897 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8898 def : Pat<(store (v4i32 (vextract128_extract:$ext (v8i32 VR256:$src1),
8899 (iPTR imm))), addr:$dst),
8900 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8901 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8902 def : Pat<(store (v8i16 (vextract128_extract:$ext (v16i16 VR256:$src1),
8903 (iPTR imm))), addr:$dst),
8904 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8905 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8906 def : Pat<(store (v16i8 (vextract128_extract:$ext (v32i8 VR256:$src1),
8907 (iPTR imm))), addr:$dst),
8908 (VEXTRACTI128mr addr:$dst, VR256:$src1,
8909 (EXTRACT_get_vextract128_imm VR128:$ext))>;
8910 }
8912 //===----------------------------------------------------------------------===//
8913 // VPMASKMOV - Conditional SIMD Integer Packed Loads and Stores
8914 //
8915 multiclass avx2_pmovmask<string OpcodeStr,
8916 Intrinsic IntLd128, Intrinsic IntLd256,
8917 Intrinsic IntSt128, Intrinsic IntSt256> {
8918 def rm : AVX28I<0x8c, MRMSrcMem, (outs VR128:$dst),
8919 (ins VR128:$src1, i128mem:$src2),
8920 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8921 [(set VR128:$dst, (IntLd128 addr:$src2, VR128:$src1))]>, VEX_4V;
8922 def Yrm : AVX28I<0x8c, MRMSrcMem, (outs VR256:$dst),
8923 (ins VR256:$src1, i256mem:$src2),
8924 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8925 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
8926 VEX_4V, VEX_L;
8927 def mr : AVX28I<0x8e, MRMDestMem, (outs),
8928 (ins i128mem:$dst, VR128:$src1, VR128:$src2),
8929 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8930 [(IntSt128 addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
8931 def Ymr : AVX28I<0x8e, MRMDestMem, (outs),
8932 (ins i256mem:$dst, VR256:$src1, VR256:$src2),
8933 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
8934 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L;
8935 }
8937 defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd",
8938 int_x86_avx2_maskload_d,
8939 int_x86_avx2_maskload_d_256,
8940 int_x86_avx2_maskstore_d,
8941 int_x86_avx2_maskstore_d_256>;
8942 defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq",
8943 int_x86_avx2_maskload_q,
8944 int_x86_avx2_maskload_q_256,
8945 int_x86_avx2_maskstore_q,
8946 int_x86_avx2_maskstore_q_256>, VEX_W;
8948 def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src)),
8949 (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8951 def: Pat<(masked_store addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src)),
8952 (VPMASKMOVDYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8954 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8955 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8957 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask),
8958 (bc_v8f32 (v8i32 immAllZerosV)))),
8959 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8961 def: Pat<(v8f32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8f32 VR256:$src0))),
8962 (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
8963 VR256:$mask)>;
8965 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), undef)),
8966 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8968 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 immAllZerosV))),
8969 (VPMASKMOVDYrm VR256:$mask, addr:$ptr)>;
8971 def: Pat<(v8i32 (masked_load addr:$ptr, (v8i32 VR256:$mask), (v8i32 VR256:$src0))),
8972 (VBLENDVPSYrr VR256:$src0, (VPMASKMOVDYrm VR256:$mask, addr:$ptr),
8973 VR256:$mask)>;
8975 def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src)),
8976 (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8978 def: Pat<(masked_store addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src)),
8979 (VPMASKMOVQYmr addr:$ptr, VR256:$mask, VR256:$src)>;
8981 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8982 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8984 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8985 (v4f64 immAllZerosV))),
8986 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8988 def: Pat<(v4f64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4f64 VR256:$src0))),
8989 (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
8990 VR256:$mask)>;
8992 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), undef)),
8993 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8995 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask),
8996 (bc_v4i64 (v8i32 immAllZerosV)))),
8997 (VPMASKMOVQYrm VR256:$mask, addr:$ptr)>;
8999 def: Pat<(v4i64 (masked_load addr:$ptr, (v4i64 VR256:$mask), (v4i64 VR256:$src0))),
9000 (VBLENDVPDYrr VR256:$src0, (VPMASKMOVQYrm VR256:$mask, addr:$ptr),
9001 VR256:$mask)>;
9004 //===----------------------------------------------------------------------===//
9005 // Variable Bit Shifts
9006 //
9007 multiclass avx2_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
9008 ValueType vt128, ValueType vt256> {
9009 def rr : AVX28I<opc, MRMSrcReg, (outs VR128:$dst),
9010 (ins VR128:$src1, VR128:$src2),
9011 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
9012 [(set VR128:$dst,
9013 (vt128 (OpNode VR128:$src1, (vt128 VR128:$src2))))]>,
9014 VEX_4V, Sched<[WriteVarVecShift]>;
9015 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst),
9016 (ins VR128:$src1, i128mem:$src2),
9017 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
9018 [(set VR128:$dst,
9019 (vt128 (OpNode VR128:$src1,
9020 (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
9021 VEX_4V, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
9022 def Yrr : AVX28I<opc, MRMSrcReg, (outs VR256:$dst),
9023 (ins VR256:$src1, VR256:$src2),
9024 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
9025 [(set VR256:$dst,
9026 (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>,
9027 VEX_4V, VEX_L, Sched<[WriteVarVecShift]>;
9028 def Yrm : AVX28I<opc, MRMSrcMem, (outs VR256:$dst),
9029 (ins VR256:$src1, i256mem:$src2),
9030 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
9031 [(set VR256:$dst,
9032 (vt256 (OpNode VR256:$src1,
9033 (vt256 (bitconvert (loadv4i64 addr:$src2))))))]>,
9034 VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
9035 }
9037 defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
9038 defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
9039 defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
9040 defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
9041 defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
9043 //===----------------------------------------------------------------------===//
9044 // VGATHER - GATHER Operations
9045 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
9046 X86MemOperand memop128, X86MemOperand memop256> {
9047 def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
9048 (ins VR128:$src1, memop128:$src2, VR128:$mask),
9049 !strconcat(OpcodeStr,
9050 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
9051 []>, VEX_4VOp3;
9052 def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
9053 (ins RC256:$src1, memop256:$src2, RC256:$mask),
9054 !strconcat(OpcodeStr,
9055 "\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
9056 []>, VEX_4VOp3, VEX_L;
9057 }
9059 let mayLoad = 1, Constraints
9060 = "@earlyclobber $dst,@earlyclobber $mask_wb, $src1 = $dst, $mask = $mask_wb"
9061 in {
9062 defm VPGATHERDQ : avx2_gather<0x90, "vpgatherdq", VR256, vx64mem, vx64mem>, VEX_W;
9063 defm VPGATHERQQ : avx2_gather<0x91, "vpgatherqq", VR256, vx64mem, vy64mem>, VEX_W;
9064 defm VPGATHERDD : avx2_gather<0x90, "vpgatherdd", VR256, vx32mem, vy32mem>;
9065 defm VPGATHERQD : avx2_gather<0x91, "vpgatherqd", VR128, vx32mem, vy32mem>;
9067 let ExeDomain = SSEPackedDouble in {
9068 defm VGATHERDPD : avx2_gather<0x92, "vgatherdpd", VR256, vx64mem, vx64mem>, VEX_W;
9069 defm VGATHERQPD : avx2_gather<0x93, "vgatherqpd", VR256, vx64mem, vy64mem>, VEX_W;
9070 }
9072 let ExeDomain = SSEPackedSingle in {
9073 defm VGATHERDPS : avx2_gather<0x92, "vgatherdps", VR256, vx32mem, vy32mem>;
9074 defm VGATHERQPS : avx2_gather<0x93, "vgatherqps", VR128, vx32mem, vy32mem>;
9075 }
9076 }