@@ -314,9 +314,12 @@ const detail::ScheduleTree* findThreadMappingAncestor(
314
314
}
315
315
316
316
/*
317
- * Check if a reference group is accessed in a coalesced way.
317
+ * Should this reference group be promoted for the purpose of coalescing?
318
318
*
319
- * In particular, check if incrementing the schedule dimension mapped to
319
+ * If the reference group is not already accessed in a coalesced way,
320
+ * then the group should be promoted.
321
+ * The check for coalesced accesses is performed as follows.
322
+ * Check if incrementing the schedule dimension mapped to
320
323
* Thread::x results in the last tensor index being incremented as well.
321
324
* Since accesses in the group may belong to different statements, which may
322
325
* have different loops mapped to Thread::x, perform the check for each basic
@@ -325,7 +328,7 @@ const detail::ScheduleTree* findThreadMappingAncestor(
325
328
* accessed in a coalesced way if all references in this group are accessed in
326
329
* a coalesced way.
327
330
*/
328
- bool isCoalesced (
331
+ bool promotionImprovesCoalescing (
329
332
const ThreadIdxXScheduleDepthState& threadIdxXScheduleDepthState,
330
333
const TensorReferenceGroup& group,
331
334
isl::union_map schedule,
@@ -353,11 +356,11 @@ bool isCoalesced(
353
356
.apply_range (scheduledAccess);
354
357
355
358
if (not accessedByAdjacentX.is_subset (elementToNext)) {
356
- return false ;
359
+ return true ;
357
360
}
358
361
}
359
362
}
360
- return true ;
363
+ return false ;
361
364
}
362
365
363
366
/*
@@ -558,7 +561,7 @@ void promoteToSharedGreedy(
558
561
// Do not promote if the group features no reuse and is accessed in a
559
562
// coalesced way.
560
563
if (!hasReuseWithin (*group, partialSchedMupa) &&
561
- isCoalesced (
564
+ ! promotionImprovesCoalescing (
562
565
threadIdxXScheduleDepthState,
563
566
*group,
564
567
fullSched,
0 commit comments