|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc < %s -march=ve -mattr=+vpu | FileCheck %s |
| 3 | + |
| 4 | +declare <256 x float> @llvm.vp.merge.v256f32(<256 x i1>, <256 x float>, <256 x float>, i32) |
| 5 | +declare <256 x float> @llvm.vp.fadd.v256f32(<256 x float>, <256 x float>, <256 x i1>, i32) |
| 6 | + |
| 7 | +define fastcc <256 x float> @test_vp_fadd_v256f32_vv_merge(<256 x float> %passthru, <256 x float> %i0, <256 x float> %i1, <256 x i1> %m, i32 %n) { |
| 8 | +; CHECK-LABEL: test_vp_fadd_v256f32_vv_merge: |
| 9 | +; CHECK: # %bb.0: |
| 10 | +; CHECK-NEXT: and %s0, %s0, (32)0 |
| 11 | +; CHECK-NEXT: lvl %s0 |
| 12 | +; CHECK-NEXT: pvfadd.up %v0, %v1, %v2, %vm1 |
| 13 | +; CHECK-NEXT: b.l.t (, %s10) |
| 14 | + %vr = call <256 x float> @llvm.vp.fadd.v256f32(<256 x float> %i0, <256 x float> %i1, <256 x i1> %m, i32 %n) |
| 15 | + %r0 = call <256 x float> @llvm.vp.merge.v256f32(<256 x i1> %m, <256 x float> %vr, <256 x float> %passthru, i32 %n) |
| 16 | + ret <256 x float> %r0 |
| 17 | +} |
| 18 | + |
| 19 | +define fastcc <256 x float> @test_vp_fadd_v256f32_rv_merge(<256 x float> %passthru, float %s0, <256 x float> %i1, <256 x i1> %m, i32 %n) { |
| 20 | +; CHECK-LABEL: test_vp_fadd_v256f32_rv_merge: |
| 21 | +; CHECK: # %bb.0: |
| 22 | +; CHECK-NEXT: and %s1, %s1, (32)0 |
| 23 | +; CHECK-NEXT: lvl %s1 |
| 24 | +; CHECK-NEXT: pvfadd.up %v0, %s0, %v1, %vm1 |
| 25 | +; CHECK-NEXT: b.l.t (, %s10) |
| 26 | + %xins = insertelement <256 x float> undef, float %s0, i32 0 |
| 27 | + %i0 = shufflevector <256 x float> %xins, <256 x float> undef, <256 x i32> zeroinitializer |
| 28 | + %vr = call <256 x float> @llvm.vp.fadd.v256f32(<256 x float> %i0, <256 x float> %i1, <256 x i1> %m, i32 %n) |
| 29 | + %r0 = call <256 x float> @llvm.vp.merge.v256f32(<256 x i1> %m, <256 x float> %vr, <256 x float> %passthru, i32 %n) |
| 30 | + ret <256 x float> %r0 |
| 31 | +} |
| 32 | + |
| 33 | +define fastcc <256 x float> @test_vp_fadd_v256f32_vr_merge(<256 x float> %passthru, <256 x float> %i0, float %s1, <256 x i1> %m, i32 %n) { |
| 34 | +; CHECK-LABEL: test_vp_fadd_v256f32_vr_merge: |
| 35 | +; CHECK: # %bb.0: |
| 36 | +; CHECK-NEXT: and %s1, %s1, (32)0 |
| 37 | +; CHECK-NEXT: lvl %s1 |
| 38 | +; CHECK-NEXT: pvfadd.up %v0, %s0, %v1, %vm1 |
| 39 | +; CHECK-NEXT: b.l.t (, %s10) |
| 40 | + %yins = insertelement <256 x float> undef, float %s1, i32 0 |
| 41 | + %i1 = shufflevector <256 x float> %yins, <256 x float> undef, <256 x i32> zeroinitializer |
| 42 | + %vr = call <256 x float> @llvm.vp.fadd.v256f32(<256 x float> %i0, <256 x float> %i1, <256 x i1> %m, i32 %n) |
| 43 | + %r0 = call <256 x float> @llvm.vp.merge.v256f32(<256 x i1> %m, <256 x float> %vr, <256 x float> %passthru, i32 %n) |
| 44 | + ret <256 x float> %r0 |
| 45 | +} |
| 46 | + |
| 47 | + |
| 48 | +declare <256 x double> @llvm.vp.merge.v256f64(<256 x i1>, <256 x double>, <256 x double>, i32) |
| 49 | +declare <256 x double> @llvm.vp.fadd.v256f64(<256 x double>, <256 x double>, <256 x i1>, i32) |
| 50 | + |
| 51 | +define fastcc <256 x double> @test_vp_fadd_v256f64_vv_merge(<256 x double> %passthru, <256 x double> %i0, <256 x double> %i1, <256 x i1> %m, i32 %n) { |
| 52 | +; CHECK-LABEL: test_vp_fadd_v256f64_vv_merge: |
| 53 | +; CHECK: # %bb.0: |
| 54 | +; CHECK-NEXT: and %s0, %s0, (32)0 |
| 55 | +; CHECK-NEXT: lvl %s0 |
| 56 | +; CHECK-NEXT: vfadd.d %v0, %v1, %v2, %vm1 |
| 57 | +; CHECK-NEXT: b.l.t (, %s10) |
| 58 | + %vr = call <256 x double> @llvm.vp.fadd.v256f64(<256 x double> %i0, <256 x double> %i1, <256 x i1> %m, i32 %n) |
| 59 | + %r0 = call <256 x double> @llvm.vp.merge.v256f64(<256 x i1> %m, <256 x double> %vr, <256 x double> %passthru, i32 %n) |
| 60 | + ret <256 x double> %r0 |
| 61 | +} |
| 62 | + |
| 63 | +define fastcc <256 x double> @test_vp_fadd_v256f64_rv_merge(<256 x double> %passthru, double %s0, <256 x double> %i1, <256 x i1> %m, i32 %n) { |
| 64 | +; CHECK-LABEL: test_vp_fadd_v256f64_rv_merge: |
| 65 | +; CHECK: # %bb.0: |
| 66 | +; CHECK-NEXT: and %s1, %s1, (32)0 |
| 67 | +; CHECK-NEXT: lvl %s1 |
| 68 | +; CHECK-NEXT: vfadd.d %v0, %s0, %v1, %vm1 |
| 69 | +; CHECK-NEXT: b.l.t (, %s10) |
| 70 | + %xins = insertelement <256 x double> undef, double %s0, i32 0 |
| 71 | + %i0 = shufflevector <256 x double> %xins, <256 x double> undef, <256 x i32> zeroinitializer |
| 72 | + %vr = call <256 x double> @llvm.vp.fadd.v256f64(<256 x double> %i0, <256 x double> %i1, <256 x i1> %m, i32 %n) |
| 73 | + %r0 = call <256 x double> @llvm.vp.merge.v256f64(<256 x i1> %m, <256 x double> %vr, <256 x double> %passthru, i32 %n) |
| 74 | + ret <256 x double> %r0 |
| 75 | +} |
| 76 | + |
| 77 | +define fastcc <256 x double> @test_vp_fadd_v256f64_vr_merge(<256 x double> %passthru, <256 x double> %i0, double %s1, <256 x i1> %m, i32 %n) { |
| 78 | +; CHECK-LABEL: test_vp_fadd_v256f64_vr_merge: |
| 79 | +; CHECK: # %bb.0: |
| 80 | +; CHECK-NEXT: and %s1, %s1, (32)0 |
| 81 | +; CHECK-NEXT: lvl %s1 |
| 82 | +; CHECK-NEXT: vfadd.d %v0, %s0, %v1, %vm1 |
| 83 | +; CHECK-NEXT: b.l.t (, %s10) |
| 84 | + %yins = insertelement <256 x double> undef, double %s1, i32 0 |
| 85 | + %i1 = shufflevector <256 x double> %yins, <256 x double> undef, <256 x i32> zeroinitializer |
| 86 | + %vr = call <256 x double> @llvm.vp.fadd.v256f64(<256 x double> %i0, <256 x double> %i1, <256 x i1> %m, i32 %n) |
| 87 | + %r0 = call <256 x double> @llvm.vp.merge.v256f64(<256 x i1> %m, <256 x double> %vr, <256 x double> %passthru, i32 %n) |
| 88 | + ret <256 x double> %r0 |
| 89 | +} |
0 commit comments