Skip to content

Commit 1d67aab

Browse files
committed
linting fixes
1 parent cbc803a commit 1d67aab

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

vllm/model_executor/models/llama4_eagle.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# SPDX-License-Identifier: Apache-2.0
22

33
from collections.abc import Iterable
4+
from typing import Optional
45

56
import torch
67
import torch.nn as nn
@@ -22,7 +23,6 @@
2223
from vllm.model_executor.models.utils import extract_layer_index
2324

2425
from .utils import AutoWeightsLoader, maybe_prefix
25-
from typing import Optional
2626

2727
logger = init_logger(__name__)
2828

@@ -39,8 +39,8 @@ def __init__(
3939
quant_config: Optional[QuantizationConfig] = None,
4040
) -> None:
4141
super().__init__()
42-
self.config = vllm_config. \
43-
speculative_config.draft_model_config.hf_config
42+
self.config = (
43+
vllm_config.speculative_config.draft_model_config.hf_config)
4444
self.validate_and_update_config(start_layer_id, quant_config)
4545
self.vocab_size = self.config.vocab_size
4646
self.embed_tokens = VocabParallelEmbedding(
@@ -153,8 +153,8 @@ class EagleLlama4ForCausalLM(Llama4ForCausalLM):
153153

154154
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
155155
nn.Module.__init__(self)
156-
self.config = vllm_config. \
157-
speculative_config.draft_model_config.hf_config
156+
self.config = (
157+
vllm_config.speculative_config.draft_model_config.hf_config)
158158
target_layer_num = vllm_config.model_config.get_num_layers(
159159
vllm_config.parallel_config)
160160
# draft model quantization config may differ from target model
@@ -177,7 +177,10 @@ def forward(
177177
) -> tuple[torch.Tensor, torch.Tensor]:
178178
return self.model(input_ids, positions, hidden_states)
179179

180-
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
180+
def load_weights(
181+
self,
182+
weights: Iterable[tuple[str, torch.Tensor]]
183+
) -> None:
181184
loader = AutoWeightsLoader(
182185
self,
183186
# lm_head is tied with target model (Llama4ForCausalLM)

0 commit comments

Comments
 (0)