Skip to content

Using EmbeddingPlannerBase for lp planner #3232

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion torchrec/distributed/planner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
- automatically building and selecting an optimized sharding plan.
"""

from torchrec.distributed.planner.planners import EmbeddingShardingPlanner # noqa
from torchrec.distributed.planner.planners import ( # noqa # noqa
EmbeddingPlannerBase,
EmbeddingShardingPlanner,
)
from torchrec.distributed.planner.types import ParameterConstraints, Topology # noqa
from torchrec.distributed.planner.utils import bytes_to_gb, sharder_name # noqa
32 changes: 16 additions & 16 deletions torchrec/distributed/planner/planners.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,22 @@ def collective_plan(
sharders,
)

def hash_planner_context_inputs(self) -> int:
"""
Generates a hash for all planner inputs except for partitioner, proposer, performance model, and stats.
These are all the inputs needed to verify whether a previously generated sharding plan is still valid in a new context.

Returns:
Generates a hash capturing topology, batch size, enumerator, storage reservation, stats and constraints.
"""
return hash_planner_context_inputs(
self._topology,
self._batch_size,
self._enumerator,
self._storage_reservation,
self._constraints,
)


class EmbeddingShardingPlanner(EmbeddingPlannerBase):
"""
Expand Down Expand Up @@ -368,22 +384,6 @@ def collective_plan(
sharders,
)

def hash_planner_context_inputs(self) -> int:
"""
Generates a hash for all planner inputs except for partitioner, proposer, performance model, and stats.
These are all the inputs needed to verify whether a previously generated sharding plan is still valid in a new context.

Returns:
Generates a hash capturing topology, batch size, enumerator, storage reservation, stats and constraints.
"""
return hash_planner_context_inputs(
self._topology,
self._batch_size,
self._enumerator,
self._storage_reservation,
self._constraints,
)

def plan(
self,
module: nn.Module,
Expand Down
Loading