diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6529f1386599c..6bee953b378e8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -2521,6 +2521,32 @@ def ComplexImagOp : CIR_Op<"complex.imag", [Pure]> { let hasFolder = 1; } +//===----------------------------------------------------------------------===// +// ComplexAddOp +//===----------------------------------------------------------------------===// + +def ComplexAddOp : CIR_Op<"complex.add", [Pure, SameOperandsAndResultType]> { + let summary = "Complex addition"; + let description = [{ + The `cir.complex.add` operation takes two complex numbers and returns + their sum. + + Example: + + ```mlir + %2 = cir.complex.add %0, %1 : !cir.complex + ``` + }]; + + let arguments = (ins CIR_ComplexType:$lhs, CIR_ComplexType:$rhs); + + let results = (outs CIR_ComplexType:$result); + + let assemblyFormat = [{ + $lhs `,` $rhs `:` qualified(type($result)) attr-dict + }]; +} + //===----------------------------------------------------------------------===// // Bit Manipulation Operations //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 84fad959ebf49..24a9bf13e1880 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -57,6 +57,55 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e); mlir::Value VisitUnaryDeref(const Expr *e); + + struct BinOpInfo { + mlir::Location loc; + mlir::Value lhs{}; + mlir::Value rhs{}; + QualType ty{}; // Computation Type. + FPOptions fpFeatures{}; + }; + + BinOpInfo emitBinOps(const BinaryOperator *e, + QualType promotionTy = QualType()); + + mlir::Value emitPromoted(const Expr *e, QualType promotionTy); + + mlir::Value emitPromotedComplexOperand(const Expr *e, QualType promotionTy); + + mlir::Value emitBinAdd(const BinOpInfo &op); + + QualType getPromotionType(QualType ty, bool isDivOpCode = false) { + if (auto *complexTy = ty->getAs()) { + QualType elementTy = complexTy->getElementType(); + if (isDivOpCode && elementTy->isFloatingType() && + cgf.getLangOpts().getComplexRange() == + LangOptions::ComplexRangeKind::CX_Promoted) { + cgf.cgm.errorNYI("HigherPrecisionTypeForComplexArithmetic"); + return QualType(); + } + + if (elementTy.UseExcessPrecision(cgf.getContext())) + return cgf.getContext().getComplexType(cgf.getContext().FloatTy); + } + + if (ty.UseExcessPrecision(cgf.getContext())) + return cgf.getContext().FloatTy; + return QualType(); + } + +#define HANDLEBINOP(OP) \ + mlir::Value VisitBin##OP(const BinaryOperator *e) { \ + QualType promotionTy = getPromotionType( \ + e->getType(), e->getOpcode() == BinaryOperatorKind::BO_Div); \ + mlir::Value result = emitBin##OP(emitBinOps(e, promotionTy)); \ + if (!promotionTy.isNull()) \ + cgf.cgm.errorNYI("Binop emitUnPromotedValue"); \ + return result; \ + } + + HANDLEBINOP(Add) +#undef HANDLEBINOP }; } // namespace @@ -291,6 +340,60 @@ mlir::Value ComplexExprEmitter::VisitUnaryDeref(const Expr *e) { return emitLoadOfLValue(e); } +mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e, + QualType promotionTy) { + e = e->IgnoreParens(); + if (const auto *bo = dyn_cast(e)) { + switch (bo->getOpcode()) { +#define HANDLE_BINOP(OP) \ + case BO_##OP: \ + return emitBin##OP(emitBinOps(bo, promotionTy)); + HANDLE_BINOP(Add) +#undef HANDLE_BINOP + default: + break; + } + } else if (isa(e)) { + cgf.cgm.errorNYI("emitPromoted UnaryOperator"); + return {}; + } + + mlir::Value result = Visit(const_cast(e)); + if (!promotionTy.isNull()) + cgf.cgm.errorNYI("emitPromoted emitPromotedValue"); + + return result; +} + +mlir::Value +ComplexExprEmitter::emitPromotedComplexOperand(const Expr *e, + QualType promotionTy) { + if (e->getType()->isAnyComplexType()) { + if (!promotionTy.isNull()) + return cgf.emitPromotedComplexExpr(e, promotionTy); + return Visit(const_cast(e)); + } + + cgf.cgm.errorNYI("emitPromotedComplexOperand non-complex type"); + return {}; +} + +ComplexExprEmitter::BinOpInfo +ComplexExprEmitter::emitBinOps(const BinaryOperator *e, QualType promotionTy) { + BinOpInfo binOpInfo{cgf.getLoc(e->getExprLoc())}; + binOpInfo.lhs = emitPromotedComplexOperand(e->getLHS(), promotionTy); + binOpInfo.rhs = emitPromotedComplexOperand(e->getRHS(), promotionTy); + binOpInfo.ty = promotionTy.isNull() ? e->getType() : promotionTy; + binOpInfo.fpFeatures = e->getFPFeaturesInEffect(cgf.getLangOpts()); + return binOpInfo; +} + +mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) { + assert(!cir::MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + return builder.create(op.loc, op.lhs, op.rhs); +} + LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) { assert(e->getOpcode() == BO_Assign && "Expected assign op"); @@ -313,3 +416,8 @@ void CIRGenFunction::emitStoreOfComplex(mlir::Location loc, mlir::Value v, LValue dest, bool isInit) { ComplexExprEmitter(*this).emitStoreOfComplex(loc, v, dest, isInit); } + +mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *e, + QualType promotionType) { + return ComplexExprEmitter(*this).emitPromoted(e, promotionType); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 76353bae68e21..f9a4c3f7bbab5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -886,6 +886,8 @@ class CIRGenFunction : public CIRGenTypeCache { void emitInitializerForField(clang::FieldDecl *field, LValue lhs, clang::Expr *init); + mlir::Value emitPromotedComplexExpr(const Expr *e, QualType promotionType); + mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType); /// Emit the computation of the specified expression of scalar type. diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index af307f6ad673d..cbbe7a2380268 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2048,6 +2048,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { CIRToLLVMBrOpLowering, CIRToLLVMCallOpLowering, CIRToLLVMCmpOpLowering, + CIRToLLVMComplexAddOpLowering, CIRToLLVMComplexCreateOpLowering, CIRToLLVMComplexImagOpLowering, CIRToLLVMComplexRealOpLowering, @@ -2357,6 +2358,55 @@ mlir::LogicalResult CIRToLLVMVecTernaryOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMComplexAddOpLowering::matchAndRewrite( + cir::ComplexAddOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + mlir::Value lhs = adaptor.getLhs(); + mlir::Value rhs = adaptor.getRhs(); + mlir::Location loc = op.getLoc(); + + auto complexType = mlir::cast(op.getLhs().getType()); + mlir::Type complexElemTy = + getTypeConverter()->convertType(complexType.getElementType()); + auto lhsReal = + rewriter.create(loc, complexElemTy, lhs, 0); + auto lhsImag = + rewriter.create(loc, complexElemTy, lhs, 1); + auto rhsReal = + rewriter.create(loc, complexElemTy, rhs, 0); + auto rhsImag = + rewriter.create(loc, complexElemTy, rhs, 1); + + mlir::Value newReal; + mlir::Value newImag; + if (complexElemTy.isInteger()) { + newReal = rewriter.create(loc, complexElemTy, lhsReal, + rhsReal); + newImag = rewriter.create(loc, complexElemTy, lhsImag, + rhsImag); + } else { + assert(!cir::MissingFeatures::fastMathFlags()); + assert(!cir::MissingFeatures::fpConstraints()); + newReal = rewriter.create(loc, complexElemTy, lhsReal, + rhsReal); + newImag = rewriter.create(loc, complexElemTy, lhsImag, + rhsImag); + } + + mlir::Type complexLLVMTy = + getTypeConverter()->convertType(op.getResult().getType()); + auto initialComplex = + rewriter.create(op->getLoc(), complexLLVMTy); + + auto realComplex = rewriter.create( + op->getLoc(), initialComplex, newReal, 0); + + rewriter.replaceOpWithNewOp(op, realComplex, + newImag, 1); + + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMComplexCreateOpLowering::matchAndRewrite( cir::ComplexCreateOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index d9fb91066317b..a41cf3f50812e 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -523,6 +523,16 @@ class CIRToLLVMGetBitfieldOpLowering mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMComplexAddOpLowering + : public mlir::OpConversionPattern { +public: + using mlir::OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::ComplexAddOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + } // namespace direct } // namespace cir diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.cpp b/clang/test/CIR/CodeGen/complex-arithmetic.cpp new file mode 100644 index 0000000000000..5131c075744c8 --- /dev/null +++ b/clang/test/CIR/CodeGen/complex-arithmetic.cpp @@ -0,0 +1,160 @@ +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll +// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM +// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -Wno-unused-value -emit-llvm %s -o %t.ll +// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG + +void foo() { + int _Complex a; + int _Complex b; + int _Complex c = a + b; +} + +// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex + +// LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[COMPLEX_A]], align 4 +// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 +// LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1 +// LLVM: %[[ADD_REAL:.*]] = add i32 %[[A_REAL]], %[[B_REAL]] +// LLVM: %[[ADD_IMAG:.*]] = add i32 %[[A_IMAG]], %[[B_IMAG]] +// LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[ADD_REAL]], 0 +// LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[ADD_IMAG]], 1 + +// OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[RESULT:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL:.*]] = add i32 %[[A_REAL]], %[[B_REAL]] +// OGCG: %[[ADD_IMAG:.*]] = add i32 %[[A_IMAG]], %[[B_IMAG]] +// OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[RESULT]], i32 0, i32 0 +// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[RESULT]], i32 0, i32 1 +// OGCG: store i32 %[[ADD_REAL]], ptr %[[RESULT_REAL_PTR]], align 4 +// OGCG: store i32 %[[ADD_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4 + +void foo2() { + float _Complex a; + float _Complex b; + float _Complex c = a + b; +} + +// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex + +// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1 +// LLVM: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] +// LLVM: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0 +// LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[ADD_IMAG]], 1 + +// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 +// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 +// OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] +// OGCG: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] +// OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 0 +// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1 +// OGCG: store float %[[ADD_REAL]], ptr %[[RESULT_REAL_PTR]], align 4 +// OGCG: store float %[[ADD_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4 + +void foo3() { + float _Complex a; + float _Complex b; + float _Complex c; + float _Complex d = (a + b) + c; +} + +// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["b"] +// CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["c"] +// CIR: %[[RESULT:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["d", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr>, !cir.complex +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr>, !cir.complex +// CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex +// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr>, !cir.complex +// CIR: %[[ADD_A_B_C:.*]] = cir.complex.add %[[ADD_A_B]], %[[TMP_C]] : !cir.complex +// CIR: cir.store{{.*}} %[[ADD_A_B_C]], %[[RESULT]] : !cir.complex, !cir.ptr> + +// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1 +// LLVM: %[[ADD_REAL_A_B:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] +// LLVM: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] +// LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B]], 0 +// LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[ADD_IMAG_A_B]], 1 +// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4 +// LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0 +// LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1 +// LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0 +// LLVM: %[[C_IMAG:.*]] = extractvalue { float, float } %[[TMP_C]], 1 +// LLVM: %[[ADD_REAL_A_B_C:.*]] = fadd float %[[A_B_REAL]], %[[C_REAL]] +// LLVM: %[[ADD_IMAG_A_B_C:.*]] = fadd float %[[A_B_IMAG]], %[[C_IMAG]] +// LLVM: %[[A_B_C:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B_C]], 0 +// LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[ADD_IMAG_A_B_C]], 1 +// LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4 + +// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 +// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 +// OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4 +// OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL_A_B:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] +// OGCG: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0 +// OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1 +// OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL_A_B_C:.*]] = fadd float %[[ADD_REAL_A_B]], %[[C_REAL]] +// OGCG: %[[ADD_IMAG_A_B_C:.*]] = fadd float %[[ADD_IMAG_A_B]], %[[C_IMAG]] +// OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 0 +// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1 +// OGCG: store float %[[ADD_REAL_A_B_C]], ptr %[[RESULT_REAL_PTR]], align 4 +// OGCG: store float %[[ADD_IMAG_A_B_C]], ptr %[[RESULT_IMAG_PTR]], align 4