Skip to content

Commit 889ac87

Browse files
authored
[mlir][Vector] Remove usage of vector.insertelement/extractelement from Vector (#144413)
This PR is part of the last step to remove `vector.extractelement` and `vector.insertelement` ops. RFC: https://discourse.llvm.org/t/rfc-psa-remove-vector-extractelement-and-vector-insertelement-ops-in-favor-of-vector-extract-and-vector-insert-ops It removes instances of `vector.extractelement` and `vector.insertelement` from the Vector dialect layer.
1 parent 61d52ea commit 889ac87

File tree

7 files changed

+72
-185
lines changed

7 files changed

+72
-185
lines changed

mlir/include/mlir/Dialect/Vector/Transforms/VectorRewritePatterns.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -235,8 +235,8 @@ void populateBreakDownVectorReductionPatterns(
235235
///
236236
/// [DecomposeNDExtractStridedSlice]
237237
/// ================================
238-
/// For such cases, we can rewrite it to ExtractOp/ExtractElementOp + lower
239-
/// rank ExtractStridedSliceOp + InsertOp/InsertElementOp for the n-D case.
238+
/// For such cases, we can rewrite it to ExtractOp + lower rank
239+
/// ExtractStridedSliceOp + InsertOp for the n-D case.
240240
void populateVectorInsertExtractStridedSliceDecompositionPatterns(
241241
RewritePatternSet &patterns, PatternBenefit benefit = 1);
242242

mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ static Value generateMaskCheck(OpBuilder &b, OpTy xferOp, Value iv) {
154154
return Value();
155155

156156
Location loc = xferOp.getLoc();
157-
return b.create<vector::ExtractElementOp>(loc, xferOp.getMask(), iv);
157+
return b.create<vector::ExtractOp>(loc, xferOp.getMask(), iv);
158158
}
159159

160160
/// Helper function TransferOpConversion and TransferOp1dConversion.
@@ -757,8 +757,7 @@ struct DecomposePrintOpConversion : public VectorToSCFPattern<vector::PrintOp> {
757757

758758
if (vectorType.getRank() != 1) {
759759
// Flatten n-D vectors to 1D. This is done to allow indexing with a
760-
// non-constant value (which can currently only be done via
761-
// vector.extractelement for 1D vectors).
760+
// non-constant value.
762761
auto flatLength = std::accumulate(shape.begin(), shape.end(), 1,
763762
std::multiplies<int64_t>());
764763
auto flatVectorType =
@@ -821,8 +820,7 @@ struct DecomposePrintOpConversion : public VectorToSCFPattern<vector::PrintOp> {
821820
}
822821

823822
// Print the scalar elements in the inner most loop.
824-
auto element =
825-
rewriter.create<vector::ExtractElementOp>(loc, value, flatIndex);
823+
auto element = rewriter.create<vector::ExtractOp>(loc, value, flatIndex);
826824
rewriter.create<vector::PrintOp>(loc, element,
827825
vector::PrintPunctuation::NoPunctuation);
828826

@@ -1575,7 +1573,7 @@ struct Strategy1d<TransferReadOp> {
15751573
/*inBoundsCase=*/
15761574
[&](OpBuilder &b, Location loc) {
15771575
Value val = b.create<memref::LoadOp>(loc, xferOp.getBase(), indices);
1578-
return b.create<vector::InsertElementOp>(loc, val, vec, iv);
1576+
return b.create<vector::InsertOp>(loc, val, vec, iv);
15791577
},
15801578
/*outOfBoundsCase=*/
15811579
[&](OpBuilder & /*b*/, Location loc) { return vec; });
@@ -1603,8 +1601,7 @@ struct Strategy1d<TransferWriteOp> {
16031601
generateInBoundsCheck(
16041602
b, xferOp, iv, dim,
16051603
/*inBoundsCase=*/[&](OpBuilder &b, Location loc) {
1606-
auto val =
1607-
b.create<vector::ExtractElementOp>(loc, xferOp.getVector(), iv);
1604+
auto val = b.create<vector::ExtractOp>(loc, xferOp.getVector(), iv);
16081605
b.create<memref::StoreOp>(loc, val, xferOp.getBase(), indices);
16091606
});
16101607
b.create<scf::YieldOp>(loc);

mlir/lib/Dialect/Vector/Transforms/VectorDistribute.cpp

Lines changed: 6 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1459,27 +1459,6 @@ struct WarpOpExtractScalar : public WarpDistributionPattern {
14591459
WarpShuffleFromIdxFn warpShuffleFromIdxFn;
14601460
};
14611461

1462-
/// Pattern to convert vector.extractelement to vector.extract.
1463-
struct WarpOpExtractElement : public WarpDistributionPattern {
1464-
using Base::Base;
1465-
LogicalResult matchAndRewrite(WarpExecuteOnLane0Op warpOp,
1466-
PatternRewriter &rewriter) const override {
1467-
OpOperand *operand =
1468-
getWarpResult(warpOp, llvm::IsaPred<vector::ExtractElementOp>);
1469-
if (!operand)
1470-
return failure();
1471-
auto extractOp = operand->get().getDefiningOp<vector::ExtractElementOp>();
1472-
SmallVector<OpFoldResult> indices;
1473-
if (auto pos = extractOp.getPosition()) {
1474-
indices.push_back(pos);
1475-
}
1476-
rewriter.setInsertionPoint(extractOp);
1477-
rewriter.replaceOpWithNewOp<vector::ExtractOp>(
1478-
extractOp, extractOp.getVector(), indices);
1479-
return success();
1480-
}
1481-
};
1482-
14831462
/// Pattern to move out vector.insert with a scalar input.
14841463
/// Only supports 1-D and 0-D destinations for now.
14851464
struct WarpOpInsertScalar : public WarpDistributionPattern {
@@ -1687,26 +1666,6 @@ struct WarpOpInsert : public WarpDistributionPattern {
16871666
}
16881667
};
16891668

1690-
struct WarpOpInsertElement : public WarpDistributionPattern {
1691-
using Base::Base;
1692-
LogicalResult matchAndRewrite(WarpExecuteOnLane0Op warpOp,
1693-
PatternRewriter &rewriter) const override {
1694-
OpOperand *operand =
1695-
getWarpResult(warpOp, llvm::IsaPred<vector::InsertElementOp>);
1696-
if (!operand)
1697-
return failure();
1698-
auto insertOp = operand->get().getDefiningOp<vector::InsertElementOp>();
1699-
SmallVector<OpFoldResult> indices;
1700-
if (auto pos = insertOp.getPosition()) {
1701-
indices.push_back(pos);
1702-
}
1703-
rewriter.setInsertionPoint(insertOp);
1704-
rewriter.replaceOpWithNewOp<vector::InsertOp>(
1705-
insertOp, insertOp.getSource(), insertOp.getDest(), indices);
1706-
return success();
1707-
}
1708-
};
1709-
17101669
/// Sink scf.for region out of WarpExecuteOnLane0Op. This can be done only if
17111670
/// the scf.ForOp is the last operation in the region so that it doesn't
17121671
/// change the order of execution. This creates a new scf.for region after the
@@ -1965,12 +1924,12 @@ void mlir::vector::populatePropagateWarpVectorDistributionPatterns(
19651924
const WarpShuffleFromIdxFn &warpShuffleFromIdxFn, PatternBenefit benefit,
19661925
PatternBenefit readBenefit) {
19671926
patterns.add<WarpOpTransferRead>(patterns.getContext(), readBenefit);
1968-
patterns.add<WarpOpElementwise, WarpOpDeadResult, WarpOpBroadcast,
1969-
WarpOpShapeCast, WarpOpExtract, WarpOpForwardOperand,
1970-
WarpOpConstant, WarpOpExtractElement, WarpOpInsertElement,
1971-
WarpOpInsertScalar, WarpOpInsert, WarpOpCreateMask,
1972-
WarpOpExtractStridedSlice, WarpOpInsertStridedSlice>(
1973-
patterns.getContext(), benefit);
1927+
patterns
1928+
.add<WarpOpElementwise, WarpOpDeadResult, WarpOpBroadcast,
1929+
WarpOpShapeCast, WarpOpExtract, WarpOpForwardOperand, WarpOpConstant,
1930+
WarpOpInsertScalar, WarpOpInsert, WarpOpCreateMask,
1931+
WarpOpExtractStridedSlice, WarpOpInsertStridedSlice>(
1932+
patterns.getContext(), benefit);
19741933
patterns.add<WarpOpExtractScalar>(patterns.getContext(), warpShuffleFromIdxFn,
19751934
benefit);
19761935
patterns.add<WarpOpScfForOp>(patterns.getContext(), distributionMapFn,

mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp

Lines changed: 25 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -775,23 +775,26 @@ class FlattenContiguousRowMajorTransferWritePattern
775775
unsigned targetVectorBitwidth;
776776
};
777777

778-
/// Base class for `vector.extract/vector.extract_element(vector.transfer_read)`
779-
/// to `memref.load` patterns. The `match` method is shared for both
780-
/// `vector.extract` and `vector.extract_element`.
781-
template <class VectorExtractOp>
782-
class RewriteScalarExtractOfTransferReadBase
783-
: public OpRewritePattern<VectorExtractOp> {
784-
using Base = OpRewritePattern<VectorExtractOp>;
785-
778+
/// Rewrite `vector.extract(vector.transfer_read)` to `memref.load`.
779+
///
780+
/// All the users of the transfer op must be `vector.extract` ops. If
781+
/// `allowMultipleUses` is set to true, rewrite transfer ops with any number of
782+
/// users. Otherwise, rewrite only if the extract op is the single user of the
783+
/// transfer op. Rewriting a single vector load with multiple scalar loads may
784+
/// negatively affect performance.
785+
class RewriteScalarExtractOfTransferRead
786+
: public OpRewritePattern<vector::ExtractOp> {
786787
public:
787-
RewriteScalarExtractOfTransferReadBase(MLIRContext *context,
788-
PatternBenefit benefit,
789-
bool allowMultipleUses)
790-
: Base(context, benefit), allowMultipleUses(allowMultipleUses) {}
791-
792-
LogicalResult match(VectorExtractOp extractOp) const {
793-
auto xferOp =
794-
extractOp.getVector().template getDefiningOp<vector::TransferReadOp>();
788+
RewriteScalarExtractOfTransferRead(MLIRContext *context,
789+
PatternBenefit benefit,
790+
bool allowMultipleUses)
791+
: OpRewritePattern(context, benefit),
792+
allowMultipleUses(allowMultipleUses) {}
793+
794+
LogicalResult matchAndRewrite(vector::ExtractOp extractOp,
795+
PatternRewriter &rewriter) const override {
796+
// Match phase.
797+
auto xferOp = extractOp.getVector().getDefiningOp<vector::TransferReadOp>();
795798
if (!xferOp)
796799
return failure();
797800
// Check that we are extracting a scalar and not a sub-vector.
@@ -803,8 +806,7 @@ class RewriteScalarExtractOfTransferReadBase
803806
// If multiple uses are allowed, check if all the xfer uses are extract ops.
804807
if (allowMultipleUses &&
805808
!llvm::all_of(xferOp->getUses(), [](OpOperand &use) {
806-
return isa<vector::ExtractOp, vector::ExtractElementOp>(
807-
use.getOwner());
809+
return isa<vector::ExtractOp>(use.getOwner());
808810
}))
809811
return failure();
810812
// Mask not supported.
@@ -816,81 +818,8 @@ class RewriteScalarExtractOfTransferReadBase
816818
// Cannot rewrite if the indices may be out of bounds.
817819
if (xferOp.hasOutOfBoundsDim())
818820
return failure();
819-
return success();
820-
}
821-
822-
private:
823-
bool allowMultipleUses;
824-
};
825-
826-
/// Rewrite `vector.extractelement(vector.transfer_read)` to `memref.load`.
827-
///
828-
/// All the users of the transfer op must be either `vector.extractelement` or
829-
/// `vector.extract` ops. If `allowMultipleUses` is set to true, rewrite
830-
/// transfer ops with any number of users. Otherwise, rewrite only if the
831-
/// extract op is the single user of the transfer op. Rewriting a single
832-
/// vector load with multiple scalar loads may negatively affect performance.
833-
class RewriteScalarExtractElementOfTransferRead
834-
: public RewriteScalarExtractOfTransferReadBase<vector::ExtractElementOp> {
835-
using RewriteScalarExtractOfTransferReadBase::
836-
RewriteScalarExtractOfTransferReadBase;
837-
838-
LogicalResult matchAndRewrite(vector::ExtractElementOp extractOp,
839-
PatternRewriter &rewriter) const override {
840-
if (failed(match(extractOp)))
841-
return failure();
842-
843-
// Construct scalar load.
844-
auto loc = extractOp.getLoc();
845-
auto xferOp = extractOp.getVector().getDefiningOp<vector::TransferReadOp>();
846-
SmallVector<Value> newIndices(xferOp.getIndices().begin(),
847-
xferOp.getIndices().end());
848-
if (extractOp.getPosition()) {
849-
AffineExpr sym0, sym1;
850-
bindSymbols(extractOp.getContext(), sym0, sym1);
851-
OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
852-
rewriter, loc, sym0 + sym1,
853-
{newIndices[newIndices.size() - 1], extractOp.getPosition()});
854-
if (auto value = dyn_cast<Value>(ofr)) {
855-
newIndices[newIndices.size() - 1] = value;
856-
} else {
857-
newIndices[newIndices.size() - 1] =
858-
rewriter.create<arith::ConstantIndexOp>(loc,
859-
*getConstantIntValue(ofr));
860-
}
861-
}
862-
if (isa<MemRefType>(xferOp.getBase().getType())) {
863-
rewriter.replaceOpWithNewOp<memref::LoadOp>(extractOp, xferOp.getBase(),
864-
newIndices);
865-
} else {
866-
rewriter.replaceOpWithNewOp<tensor::ExtractOp>(
867-
extractOp, xferOp.getBase(), newIndices);
868-
}
869-
870-
return success();
871-
}
872-
};
873-
874-
/// Rewrite `vector.extractelement(vector.transfer_read)` to `memref.load`.
875-
/// Rewrite `vector.extract(vector.transfer_read)` to `memref.load`.
876-
///
877-
/// All the users of the transfer op must be either `vector.extractelement` or
878-
/// `vector.extract` ops. If `allowMultipleUses` is set to true, rewrite
879-
/// transfer ops with any number of users. Otherwise, rewrite only if the
880-
/// extract op is the single user of the transfer op. Rewriting a single
881-
/// vector load with multiple scalar loads may negatively affect performance.
882-
class RewriteScalarExtractOfTransferRead
883-
: public RewriteScalarExtractOfTransferReadBase<vector::ExtractOp> {
884-
using RewriteScalarExtractOfTransferReadBase::
885-
RewriteScalarExtractOfTransferReadBase;
886-
887-
LogicalResult matchAndRewrite(vector::ExtractOp extractOp,
888-
PatternRewriter &rewriter) const override {
889-
if (failed(match(extractOp)))
890-
return failure();
891821

892-
// Construct scalar load.
893-
auto xferOp = extractOp.getVector().getDefiningOp<vector::TransferReadOp>();
822+
// Rewrite phase: construct scalar load.
894823
SmallVector<Value> newIndices(xferOp.getIndices().begin(),
895824
xferOp.getIndices().end());
896825
for (auto [i, pos] : llvm::enumerate(extractOp.getMixedPosition())) {
@@ -931,6 +860,9 @@ class RewriteScalarExtractOfTransferRead
931860

932861
return success();
933862
}
863+
864+
private:
865+
bool allowMultipleUses;
934866
};
935867

936868
/// Rewrite transfer_writes of vectors of size 1 (e.g., vector<1x1xf32>)
@@ -987,8 +919,7 @@ void mlir::vector::transferOpflowOpt(RewriterBase &rewriter,
987919
void mlir::vector::populateScalarVectorTransferLoweringPatterns(
988920
RewritePatternSet &patterns, PatternBenefit benefit,
989921
bool allowMultipleUses) {
990-
patterns.add<RewriteScalarExtractElementOfTransferRead,
991-
RewriteScalarExtractOfTransferRead>(patterns.getContext(),
922+
patterns.add<RewriteScalarExtractOfTransferRead>(patterns.getContext(),
992923
benefit, allowMultipleUses);
993924
patterns.add<RewriteScalarWrite>(patterns.getContext(), benefit);
994925
}

mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ func.func @materialize_read_1d() {
3737
// Both accesses in the load must be clipped otherwise %i1 + 2 and %i1 + 3 will go out of bounds.
3838
// CHECK: scf.if
3939
// CHECK-NEXT: memref.load
40-
// CHECK-NEXT: vector.insertelement
40+
// CHECK-NEXT: vector.insert
4141
// CHECK-NEXT: scf.yield
4242
// CHECK-NEXT: else
4343
// CHECK-NEXT: scf.yield
@@ -103,7 +103,7 @@ func.func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
103103
// CHECK: %[[L0:.*]] = affine.apply #[[$ADD]](%[[I0]], %[[I6]])
104104
// CHECK: scf.if {{.*}} -> (vector<3xf32>) {
105105
// CHECK-NEXT: %[[SCAL:.*]] = memref.load %{{.*}}[%[[L0]], %[[I1]], %[[I2]], %[[L3]]] : memref<?x?x?x?xf32>
106-
// CHECK-NEXT: %[[RVEC:.*]] = vector.insertelement %[[SCAL]], %{{.*}}[%[[I6]] : index] : vector<3xf32>
106+
// CHECK-NEXT: %[[RVEC:.*]] = vector.insert %[[SCAL]], %{{.*}} [%[[I6]]] : f32 into vector<3xf32>
107107
// CHECK-NEXT: scf.yield
108108
// CHECK-NEXT: } else {
109109
// CHECK-NEXT: scf.yield
@@ -540,9 +540,9 @@ func.func @transfer_write_scalable(%arg0: memref<?xf32, strided<[?], offset: ?>>
540540
// CHECK: %[[VSCALE:.*]] = vector.vscale
541541
// CHECK: %[[UB:.*]] = arith.muli %[[VSCALE]], %[[C_16]] : index
542542
// CHECK: scf.for %[[IDX:.*]] = %[[C_0]] to %[[UB]] step %[[STEP]] {
543-
// CHECK: %[[MASK_VAL:.*]] = vector.extractelement %[[MASK_VEC]][%[[IDX]] : index] : vector<[16]xi1>
543+
// CHECK: %[[MASK_VAL:.*]] = vector.extract %[[MASK_VEC]][%[[IDX]]] : i1 from vector<[16]xi1>
544544
// CHECK: scf.if %[[MASK_VAL]] {
545-
// CHECK: %[[VAL_TO_STORE:.*]] = vector.extractelement %{{.*}}[%[[IDX]] : index] : vector<[16]xf32>
545+
// CHECK: %[[VAL_TO_STORE:.*]] = vector.extract %{{.*}}[%[[IDX]]] : f32 from vector<[16]xf32>
546546
// CHECK: memref.store %[[VAL_TO_STORE]], %[[ARG_0]][%[[IDX]]] : memref<?xf32, strided<[?], offset: ?>>
547547
// CHECK: } else {
548548
// CHECK: }
@@ -561,7 +561,7 @@ func.func @vector_print_vector_0d(%arg0: vector<f32>) {
561561
// CHECK: %[[FLAT_VEC:.*]] = vector.shape_cast %[[VEC]] : vector<f32> to vector<1xf32>
562562
// CHECK: vector.print punctuation <open>
563563
// CHECK: scf.for %[[IDX:.*]] = %[[C0]] to %[[C1]] step %[[C1]] {
564-
// CHECK: %[[EL:.*]] = vector.extractelement %[[FLAT_VEC]]{{\[}}%[[IDX]] : index] : vector<1xf32>
564+
// CHECK: %[[EL:.*]] = vector.extract %[[FLAT_VEC]][%[[IDX]]] : f32 from vector<1xf32>
565565
// CHECK: vector.print %[[EL]] : f32 punctuation <no_punctuation>
566566
// CHECK: %[[IS_NOT_LAST:.*]] = arith.cmpi ult, %[[IDX]], %[[C0]] : index
567567
// CHECK: scf.if %[[IS_NOT_LAST]] {
@@ -591,7 +591,7 @@ func.func @vector_print_vector(%arg0: vector<2x2xf32>) {
591591
// CHECK: scf.for %[[J:.*]] = %[[C0]] to %[[C2]] step %[[C1]] {
592592
// CHECK: %[[OUTER_INDEX:.*]] = arith.muli %[[I]], %[[C2]] : index
593593
// CHECK: %[[FLAT_INDEX:.*]] = arith.addi %[[J]], %[[OUTER_INDEX]] : index
594-
// CHECK: %[[EL:.*]] = vector.extractelement %[[FLAT_VEC]]{{\[}}%[[FLAT_INDEX]] : index] : vector<4xf32>
594+
// CHECK: %[[EL:.*]] = vector.extract %[[FLAT_VEC]][%[[FLAT_INDEX]]] : f32 from vector<4xf32>
595595
// CHECK: vector.print %[[EL]] : f32 punctuation <no_punctuation>
596596
// CHECK: %[[IS_NOT_LAST_J:.*]] = arith.cmpi ult, %[[J]], %[[C1]] : index
597597
// CHECK: scf.if %[[IS_NOT_LAST_J]] {
@@ -625,7 +625,7 @@ func.func @vector_print_scalable_vector(%arg0: vector<[4]xi32>) {
625625
// CHECK: %[[LAST_INDEX:.*]] = arith.subi %[[UPPER_BOUND]], %[[C1]] : index
626626
// CHECK: vector.print punctuation <open>
627627
// CHECK: scf.for %[[IDX:.*]] = %[[C0]] to %[[UPPER_BOUND]] step %[[C1]] {
628-
// CHECK: %[[EL:.*]] = vector.extractelement %[[VEC]]{{\[}}%[[IDX]] : index] : vector<[4]xi32>
628+
// CHECK: %[[EL:.*]] = vector.extract %[[VEC]][%[[IDX]]] : i32 from vector<[4]xi32>
629629
// CHECK: vector.print %[[EL]] : i32 punctuation <no_punctuation>
630630
// CHECK: %[[IS_NOT_LAST:.*]] = arith.cmpi ult, %[[IDX]], %[[LAST_INDEX]] : index
631631
// CHECK: scf.if %[[IS_NOT_LAST]] {

mlir/test/Dialect/Vector/scalar-vector-transfer-to-memref.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
func.func @transfer_read_0d(%m: memref<?x?x?xf32>, %idx: index) -> f32 {
99
%cst = arith.constant 0.0 : f32
1010
%0 = vector.transfer_read %m[%idx, %idx, %idx], %cst : memref<?x?x?xf32>, vector<f32>
11-
%1 = vector.extractelement %0[] : vector<f32>
11+
%1 = vector.extract %0[] : f32 from vector<f32>
1212
return %1 : f32
1313
}
1414

@@ -24,7 +24,7 @@ func.func @transfer_read_1d(%m: memref<?x?x?xf32>, %idx: index, %idx2: index) ->
2424
%cst = arith.constant 0.0 : f32
2525
%c0 = arith.constant 0 : index
2626
%0 = vector.transfer_read %m[%idx, %idx, %idx], %cst {in_bounds = [true]} : memref<?x?x?xf32>, vector<5xf32>
27-
%1 = vector.extractelement %0[%idx2 : index] : vector<5xf32>
27+
%1 = vector.extract %0[%idx2] : f32 from vector<5xf32>
2828
return %1 : f32
2929
}
3030

@@ -37,7 +37,7 @@ func.func @transfer_read_1d(%m: memref<?x?x?xf32>, %idx: index, %idx2: index) ->
3737
func.func @tensor_transfer_read_0d(%t: tensor<?x?x?xf32>, %idx: index) -> f32 {
3838
%cst = arith.constant 0.0 : f32
3939
%0 = vector.transfer_read %t[%idx, %idx, %idx], %cst : tensor<?x?x?xf32>, vector<f32>
40-
%1 = vector.extractelement %0[] : vector<f32>
40+
%1 = vector.extract %0[] : f32 from vector<f32>
4141
return %1 : f32
4242
}
4343

0 commit comments

Comments
 (0)