We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent baed180 commit c40784cCopy full SHA for c40784c
vllm/v1/worker/xpu_worker.py
@@ -148,11 +148,11 @@ def init_device(self):
148
os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT
149
os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE
150
os.environ["LOCAL_RANK"] = str(self.local_rank)
151
- dist_backend = "ccl"
152
153
init_worker_distributed_environment(self.vllm_config, self.rank,
154
self.distributed_init_method,
155
- self.local_rank, dist_backend)
+ self.local_rank,
+ current_platform.dist_backend)
156
157
# global all_reduce needed for overall oneccl warm up
158
torch.distributed.all_reduce(torch.zeros(1).xpu())
0 commit comments