-
Notifications
You must be signed in to change notification settings - Fork 14.5k
[MLIR][Affine] Add default null init for mlir::affine::MemRefAccess #147922
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[MLIR][Affine] Add default null init for mlir::affine::MemRefAccess #147922
Conversation
Add default null init for mlir::affine::MemRefAccess. This is consistent with various other MLIR structures and had been missing for mlir::affine::MemRefAccess.
@llvm/pr-subscribers-mlir @llvm/pr-subscribers-mlir-affine Author: Uday Bondhugula (bondhugula) ChangesAdd default null init for Full diff: https://github.com/llvm/llvm-project/pull/147922.diff 2 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h b/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
index 4134aef8174bc..3e4b8648061ff 100644
--- a/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
+++ b/mlir/include/mlir/Dialect/Affine/Analysis/AffineAnalysis.h
@@ -81,13 +81,13 @@ LogicalResult getIndexSet(MutableArrayRef<Operation *> ops,
/// Encapsulates a memref load or store access information.
struct MemRefAccess {
Value memref;
- Operation *opInst;
+ Operation *opInst = nullptr;
SmallVector<Value, 4> indices;
- /// Constructs a MemRefAccess from a load or store operation.
- // TODO: add accessors to standard op's load, store, DMA op's to return
- // MemRefAccess, i.e., loadOp->getAccess(), dmaOp->getRead/WriteAccess.
- explicit MemRefAccess(Operation *opInst);
+ /// Constructs a MemRefAccess from an affine read/write operation.
+ explicit MemRefAccess(Operation *memOp);
+
+ MemRefAccess() = default;
// Returns the rank of the memref associated with this access.
unsigned getRank() const;
@@ -126,10 +126,12 @@ struct MemRefAccess {
/// time (considering the memrefs, their respective affine access maps and
/// operands). The equality of access functions + operands is checked by
/// subtracting fully composed value maps, and then simplifying the difference
- /// using the expression flattener.
- /// TODO: this does not account for aliasing of memrefs.
+ /// using the expression flattener. This does not account for aliasing of
+ /// memrefs.
bool operator==(const MemRefAccess &rhs) const;
bool operator!=(const MemRefAccess &rhs) const { return !(*this == rhs); }
+
+ explicit operator bool() const { return !!memref; }
};
// DependenceComponent contains state about the direction of a dependence as an
diff --git a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
index 8bdb4c3593335..4739290bf6e4b 100644
--- a/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/Utils.cpp
@@ -1550,15 +1550,17 @@ mlir::affine::computeSliceUnion(ArrayRef<Operation *> opsA,
FlatAffineValueConstraints sliceUnionCst;
assert(sliceUnionCst.getNumDimAndSymbolVars() == 0);
std::vector<std::pair<Operation *, Operation *>> dependentOpPairs;
- for (Operation *i : opsA) {
- MemRefAccess srcAccess(i);
- for (Operation *j : opsB) {
- MemRefAccess dstAccess(j);
+ MemRefAccess srcAccess;
+ MemRefAccess dstAccess;
+ for (Operation *a : opsA) {
+ srcAccess = MemRefAccess(a);
+ for (Operation *b : opsB) {
+ dstAccess = MemRefAccess(b);
if (srcAccess.memref != dstAccess.memref)
continue;
// Check if 'loopDepth' exceeds nesting depth of src/dst ops.
- if ((!isBackwardSlice && loopDepth > getNestingDepth(i)) ||
- (isBackwardSlice && loopDepth > getNestingDepth(j))) {
+ if ((!isBackwardSlice && loopDepth > getNestingDepth(a)) ||
+ (isBackwardSlice && loopDepth > getNestingDepth(b))) {
LLVM_DEBUG(llvm::dbgs() << "Invalid loop depth\n");
return SliceComputationResult::GenericFailure;
}
@@ -1577,13 +1579,12 @@ mlir::affine::computeSliceUnion(ArrayRef<Operation *> opsA,
}
if (result.value == DependenceResult::NoDependence)
continue;
- dependentOpPairs.emplace_back(i, j);
+ dependentOpPairs.emplace_back(a, b);
// Compute slice bounds for 'srcAccess' and 'dstAccess'.
ComputationSliceState tmpSliceState;
- mlir::affine::getComputationSliceState(i, j, dependenceConstraints,
- loopDepth, isBackwardSlice,
- &tmpSliceState);
+ getComputationSliceState(a, b, dependenceConstraints, loopDepth,
+ isBackwardSlice, &tmpSliceState);
if (sliceUnionCst.getNumDimAndSymbolVars() == 0) {
// Initialize 'sliceUnionCst' with the bounds computed in previous step.
@@ -1948,16 +1949,16 @@ AffineForOp mlir::affine::insertBackwardComputationSlice(
// Constructs MemRefAccess populating it with the memref, its indices and
// opinst from 'loadOrStoreOpInst'.
-MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
- if (auto loadOp = dyn_cast<AffineReadOpInterface>(loadOrStoreOpInst)) {
+MemRefAccess::MemRefAccess(Operation *memOp) {
+ if (auto loadOp = dyn_cast<AffineReadOpInterface>(memOp)) {
memref = loadOp.getMemRef();
- opInst = loadOrStoreOpInst;
+ opInst = memOp;
llvm::append_range(indices, loadOp.getMapOperands());
} else {
- assert(isa<AffineWriteOpInterface>(loadOrStoreOpInst) &&
+ assert(isa<AffineWriteOpInterface>(memOp) &&
"Affine read/write op expected");
- auto storeOp = cast<AffineWriteOpInterface>(loadOrStoreOpInst);
- opInst = loadOrStoreOpInst;
+ auto storeOp = cast<AffineWriteOpInterface>(memOp);
+ opInst = memOp;
memref = storeOp.getMemRef();
llvm::append_range(indices, storeOp.getMapOperands());
}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Add default null init for
mlir::affine::MemRefAccess
. This is consistent with various other MLIR structures and had been missing formlir::affine::MemRefAccess
.