@@ -654,16 +654,15 @@ void promoteToRegistersBelow(MappedScop& mscop, detail::ScheduleTree* scope) {
654
654
auto blockSchedule = mscop.blockMappingSchedule (mscop.schedule ());
655
655
656
656
// Pure affine schedule without (mapping) filters.
657
- isl::multi_union_pw_aff partialSchedMupa =
658
- partialScheduleMupa<Scope>(root, scope);
657
+ auto partialSchedMupa = partialScheduleMupa<Scope>(root, scope);
659
658
// Schedule with block mapping filter.
660
659
auto partialSched =
661
660
isl::union_map::from (partialSchedMupa).intersect_domain (blockMapping);
662
661
// The following promotion validity and profitability checks need to be
663
662
// performed with respect to the block mapping, so append the block schedule.
664
663
// If the partial schedule contains it already, it will just end up with
665
664
// identical dimensions without affecting the result of the checks.
666
- partialSchedMupa = partialSchedMupa.flat_range_product (blockSchedule);
665
+ auto partialSchedBlockMupa = partialSchedMupa.range_product (blockSchedule);
667
666
668
667
for (auto & tensorGroups : groupMap) {
669
668
auto tensorId = tensorGroups.first ;
@@ -677,11 +676,11 @@ void promoteToRegistersBelow(MappedScop& mscop, detail::ScheduleTree* scope) {
677
676
continue ;
678
677
}
679
678
if (!isPromotableToRegistersBelow (
680
- *group, root, scope, partialSchedMupa , threadSchedule)) {
679
+ *group, root, scope, partialSchedBlockMupa , threadSchedule)) {
681
680
continue ;
682
681
}
683
682
// Check reuse within threads.
684
- auto schedule = partialSchedMupa. flat_range_product (threadSchedule);
683
+ auto schedule = partialSchedBlockMupa. range_product (threadSchedule);
685
684
if (!hasReuseWithin (*group, schedule)) {
686
685
continue ;
687
686
}
0 commit comments