We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 05d8b6a commit e3a9cd5Copy full SHA for e3a9cd5
vllm_ascend/worker/worker_v1.py
@@ -278,9 +278,10 @@ def pin_lora(self, lora_id: int) -> bool:
278
def execute_dummy_batch(self) -> None:
279
runner = self.model_runner
280
if runner.dp_size <= 1:
281
- raise ValueError("Dummy batch execution should only be "
282
- "performed with data parallelism enabled, but got "
283
- f"dp_size={runner.dp_size}.")
+ raise ValueError(
+ "Dummy batch execution should only be "
+ "performed with data parallelism enabled, but got "
284
+ f"dp_size={runner.dp_size}.")
285
286
# If torchair graph is enabled, notify the other DP ranks that this is a
287
# dummy run by using '-1' as a flag for num_tokens. This will be
0 commit comments