Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit ab9b2c3

Browse files
Merge pull request #304 from facebookresearch/pr/typo
fix typos in comments
2 parents e34fce7 + 26277ce commit ab9b2c3

File tree

5 files changed

+8
-8
lines changed

5 files changed

+8
-8
lines changed

tc/core/polyhedral/cuda/mapped_scop.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ void fixThreadsBelowFilter(
176176

177177
for (size_t i = begin; i < end; ++i) {
178178
if (mapping::ThreadId::makeId(i) == mapping::ThreadId::x()) {
179-
// Mapping happend below filterTree, so we need points active for its
179+
// Mapping happened below filterTree, so we need points active for its
180180
// children. After insertion, filterTree is guaranteed to have at least
181181
// one child.
182182
mscop.threadIdxXScheduleDepthState.emplace_back(std::make_pair(

tc/core/polyhedral/cuda/memory_promotion_heuristic.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -318,7 +318,7 @@ bool isCoalesced(
318318
* are mapped to threads (the innermost of them being mapped to thread x) and
319319
* the depth of this mapping can be obtained from threadIdxXScheduleDepthState.
320320
*
321-
* In parciular, the group's footprint must contain only one element and the
321+
* In particular, the group's footprint must contain only one element and the
322322
* same tensor element should never be accessed by two different threads.
323323
*/
324324
bool isPromotableToRegisterBelowThreads(
@@ -350,15 +350,15 @@ bool isPromotableToRegisterBelowThreads(
350350
auto scheduledAccesses = originalAccesses.apply_domain(schedule);
351351

352352
// Scheduled accesses contain maps from schedule dimensions to tensor
353-
// subscripts. Compute the relation that between the schedule dimensions
353+
// subscripts. Compute the relation between the schedule dimensions
354354
// mapped to threads and tensor subscripts by first removing dimensions
355355
// following the one mapped to thread x (last one assuming inverse mapping
356356
// order), then by equating all dimensions not mapped to threads to
357357
// parameters. Promotion to registers is only allowed if the resulting
358358
// relation is injective, i.e. the same tensor element is never accessed by
359359
// more than one thread. Note that our current check is overly conservative
360360
// because different values of schedule dimension may get mapped to the same
361-
// thread, in which case the could access the same tensor element.
361+
// thread, in which case they could access the same tensor element.
362362
for (auto sa : isl::UnionAsVector<isl::union_map>(scheduledAccesses)) {
363363
sa = sa.project_out(
364364
isl::dim_type::in, depth, sa.dim(isl::dim_type::in) - depth);

tc/core/polyhedral/memory_promotion.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -372,7 +372,7 @@ isl::multi_aff TensorReferenceGroup::promotion() const {
372372
isl::map map = scopedAccesses();
373373
auto accessSpace = map.get_space();
374374

375-
// lower bounsd space is S -> O; which we transform into [S -> O] -> P
375+
// lower bounds space is S -> P; which we transform into [S -> O] -> P
376376
auto lowerBounds = approximation.lowerBounds().pullback(
377377
isl::multi_aff::domain_map(accessSpace));
378378
auto promotion = isl::multi_aff::range_map(accessSpace)

tc/core/polyhedral/schedule_transforms.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,8 @@ detail::ScheduleTree* joinBandsIterative(
6464
detail::ScheduleTree* tree,
6565
bool permutable = false);
6666

67-
// Split tree rooted under relativeRoot two nested trees, one with the first
68-
// "pos" dimensions and one with the remaining dimensions.
67+
// Split tree rooted under relativeRoot into two nested trees,
68+
// one with the first "pos" dimensions and one with the remaining dimensions.
6969
// The schedules of the two bands live in anonymous spaces.
7070
// This updates the current ScheduleTree and returns it so we can chain
7171
// expressions.

test/test_cuda_mapper_memory_promotion.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -427,7 +427,7 @@ TEST_F(MapperMemoryPromotionRAW, fitAtOuterDepths) {
427427
<< "expected one reference group to be promoted";
428428

429429
// Note that due to bank conflict heuristic, we will allocate 32x33 arrays in
430-
// shraed memory which require 32x33x2x4=8448 bytes.
430+
// shared memory which require 32x33x2x4=8448 bytes.
431431
auto mscop3 = makeWithSharedGreedy(42, 40, 32, 32, 2, 8448);
432432
EXPECT_EQ(mscop3->scop().promotedDecls().size(), 2)
433433
<< "expected two reference groups to fit";

0 commit comments

Comments
 (0)