File tree Expand file tree Collapse file tree 1 file changed +10
-9
lines changed Expand file tree Collapse file tree 1 file changed +10
-9
lines changed Original file line number Diff line number Diff line change @@ -1116,17 +1116,18 @@ def _process_reqs(
1116
1116
1117
1117
# Add graph_pad_size here
1118
1118
if self .torchair_graph_enabled and not with_prefill :
1119
- if self .dp_size > 1 :
1120
- padded_batch_size = self .select_torchair_padded_batch_size (
1121
- max_num_tokens )
1122
- num_tokens_across_dp .masked_fill_ (num_tokens_across_dp == - 1 ,
1123
- padded_batch_size )
1124
- else :
1125
- padded_batch_size = self .select_torchair_padded_batch_size (
1126
- total_num_scheduled_tokens )
1119
+ max_num_tokens = (max_num_tokens
1120
+ if self .dp_size > 1 else num_input_tokens )
1121
+ padded_batch_size = self .select_torchair_padded_batch_size (
1122
+ max_num_tokens )
1123
+ num_tokens_across_dp .masked_fill_ (num_tokens_across_dp == - 1 ,
1124
+ padded_batch_size )
1127
1125
graph_pad_size = padded_batch_size - total_num_scheduled_tokens
1128
-
1129
1126
extra_builder_kwargs ['graph_pad_size' ] = graph_pad_size
1127
+ else :
1128
+ # If torchair graph is not enabled, or if with_prefill is True, the
1129
+ # dummy run batch size is set to 1.
1130
+ num_tokens_across_dp .masked_fill_ (num_tokens_across_dp == - 1 , 1 )
1130
1131
1131
1132
if self .vllm_config .model_config .use_mla :
1132
1133
attn_metadata = self .attn_metadata_builder .build ( # type: ignore
You can’t perform that action at this time.
0 commit comments