|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE |
| 3 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX |
| 4 | +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX |
| 5 | + |
| 6 | +declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone |
| 7 | +declare <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8>, <16 x i8>) nounwind readnone |
| 8 | + |
| 9 | +define <4 x i32> @combine_pmaddwd_zero(<8 x i16> %a0, <8 x i16> %a1) { |
| 10 | +; SSE-LABEL: combine_pmaddwd_zero: |
| 11 | +; SSE: # %bb.0: |
| 12 | +; SSE-NEXT: pxor %xmm1, %xmm1 |
| 13 | +; SSE-NEXT: pmaddwd %xmm1, %xmm0 |
| 14 | +; SSE-NEXT: retq |
| 15 | +; |
| 16 | +; AVX-LABEL: combine_pmaddwd_zero: |
| 17 | +; AVX: # %bb.0: |
| 18 | +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 19 | +; AVX-NEXT: vpmaddwd %xmm1, %xmm0, %xmm0 |
| 20 | +; AVX-NEXT: retq |
| 21 | + %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a0, <8 x i16> zeroinitializer) |
| 22 | + ret <4 x i32> %1 |
| 23 | +} |
| 24 | + |
| 25 | +define <4 x i32> @combine_pmaddwd_zero_commute(<8 x i16> %a0, <8 x i16> %a1) { |
| 26 | +; SSE-LABEL: combine_pmaddwd_zero_commute: |
| 27 | +; SSE: # %bb.0: |
| 28 | +; SSE-NEXT: pxor %xmm1, %xmm1 |
| 29 | +; SSE-NEXT: pmaddwd %xmm1, %xmm0 |
| 30 | +; SSE-NEXT: retq |
| 31 | +; |
| 32 | +; AVX-LABEL: combine_pmaddwd_zero_commute: |
| 33 | +; AVX: # %bb.0: |
| 34 | +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 35 | +; AVX-NEXT: vpmaddwd %xmm0, %xmm1, %xmm0 |
| 36 | +; AVX-NEXT: retq |
| 37 | + %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> zeroinitializer, <8 x i16> %a0) |
| 38 | + ret <4 x i32> %1 |
| 39 | +} |
| 40 | + |
| 41 | +define <8 x i16> @combine_pmaddubsw_zero(<16 x i8> %a0, <16 x i8> %a1) { |
| 42 | +; SSE-LABEL: combine_pmaddubsw_zero: |
| 43 | +; SSE: # %bb.0: |
| 44 | +; SSE-NEXT: pxor %xmm1, %xmm1 |
| 45 | +; SSE-NEXT: pmaddubsw %xmm1, %xmm0 |
| 46 | +; SSE-NEXT: retq |
| 47 | +; |
| 48 | +; AVX-LABEL: combine_pmaddubsw_zero: |
| 49 | +; AVX: # %bb.0: |
| 50 | +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 51 | +; AVX-NEXT: vpmaddubsw %xmm1, %xmm0, %xmm0 |
| 52 | +; AVX-NEXT: retq |
| 53 | + %1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> %a0, <16 x i8> zeroinitializer) |
| 54 | + ret <8 x i16> %1 |
| 55 | +} |
| 56 | + |
| 57 | +define <8 x i16> @combine_pmaddubsw_zero_commute(<16 x i8> %a0, <16 x i8> %a1) { |
| 58 | +; SSE-LABEL: combine_pmaddubsw_zero_commute: |
| 59 | +; SSE: # %bb.0: |
| 60 | +; SSE-NEXT: pxor %xmm1, %xmm1 |
| 61 | +; SSE-NEXT: pmaddubsw %xmm0, %xmm1 |
| 62 | +; SSE-NEXT: movdqa %xmm1, %xmm0 |
| 63 | +; SSE-NEXT: retq |
| 64 | +; |
| 65 | +; AVX-LABEL: combine_pmaddubsw_zero_commute: |
| 66 | +; AVX: # %bb.0: |
| 67 | +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| 68 | +; AVX-NEXT: vpmaddubsw %xmm0, %xmm1, %xmm0 |
| 69 | +; AVX-NEXT: retq |
| 70 | + %1 = call <8 x i16> @llvm.x86.ssse3.pmadd.ub.sw.128(<16 x i8> zeroinitializer, <16 x i8> %a0) |
| 71 | + ret <8 x i16> %1 |
| 72 | +} |
| 73 | + |
0 commit comments