Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit fa96c20

Browse files
author
Sven Verdoolaege
committed
promoteToRegistersBelow: use templated isl types
1 parent 5fb957b commit fa96c20

File tree

1 file changed

+4
-5
lines changed

1 file changed

+4
-5
lines changed

tc/core/polyhedral/cuda/memory_promotion_heuristic.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -654,16 +654,15 @@ void promoteToRegistersBelow(MappedScop& mscop, detail::ScheduleTree* scope) {
654654
auto blockSchedule = mscop.blockMappingSchedule(mscop.schedule());
655655

656656
// Pure affine schedule without (mapping) filters.
657-
isl::multi_union_pw_aff partialSchedMupa =
658-
partialScheduleMupa<Scope>(root, scope);
657+
auto partialSchedMupa = partialScheduleMupa<Scope>(root, scope);
659658
// Schedule with block mapping filter.
660659
auto partialSched =
661660
isl::union_map::from(partialSchedMupa).intersect_domain(blockMapping);
662661
// The following promotion validity and profitability checks need to be
663662
// performed with respect to the block mapping, so append the block schedule.
664663
// If the partial schedule contains it already, it will just end up with
665664
// identical dimensions without affecting the result of the checks.
666-
partialSchedMupa = partialSchedMupa.flat_range_product(blockSchedule);
665+
auto partialSchedBlockMupa = partialSchedMupa.range_product(blockSchedule);
667666

668667
for (auto& tensorGroups : groupMap) {
669668
auto tensorId = tensorGroups.first;
@@ -677,11 +676,11 @@ void promoteToRegistersBelow(MappedScop& mscop, detail::ScheduleTree* scope) {
677676
continue;
678677
}
679678
if (!isPromotableToRegistersBelow(
680-
*group, root, scope, partialSchedMupa, threadSchedule)) {
679+
*group, root, scope, partialSchedBlockMupa, threadSchedule)) {
681680
continue;
682681
}
683682
// Check reuse within threads.
684-
auto schedule = partialSchedMupa.flat_range_product(threadSchedule);
683+
auto schedule = partialSchedBlockMupa.range_product(threadSchedule);
685684
if (!hasReuseWithin(*group, schedule)) {
686685
continue;
687686
}

0 commit comments

Comments
 (0)