Skip to content

Commit a6eeae4

Browse files
committed
fix minor
Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com>
1 parent 465c4ef commit a6eeae4

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

vllm/model_executor/layers/mamba/mamba_mixer2.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
LoaderFunction, composed_weight_loader, sharded_weight_loader)
3434
from vllm.model_executor.models.mamba_cache import MambaCacheParams
3535
from vllm.model_executor.utils import set_weight_attrs
36-
from vllm.platform import current_platform
36+
from vllm.platforms import current_platform
3737
from vllm.utils import direct_register_custom_op
3838
from vllm.v1.attention.backends.mamba_attn import Mamba2AttentionMetadata
3939

@@ -746,7 +746,7 @@ def mamba_mixer2(
746746
output: torch.Tensor,
747747
layer_name: str,
748748
mup_vector: Optional[torch.Tensor] = None,
749-
):
749+
) -> None:
750750
forward_context: ForwardContext = get_forward_context()
751751
self = forward_context.no_compile_layers[layer_name]
752752
self.forward_cuda(hidden_states=hidden_states,
@@ -761,7 +761,7 @@ def mamba_mixer2_fake(
761761
output: torch.Tensor,
762762
layer_name: str,
763763
mup_vector: Optional[torch.Tensor] = None,
764-
):
764+
) -> None:
765765
return
766766

767767

0 commit comments

Comments
 (0)