@@ -4735,6 +4735,14 @@ def set_gguf_parameters(self):
4735
4735
class MambaModel (TextModel ):
4736
4736
model_arch = gguf .MODEL_ARCH .MAMBA
4737
4737
4738
+ def __init__ (self , dir_model : Path , * args , ** kwargs ):
4739
+ # Avoid using AutoConfig for hparams
4740
+ hparams = kwargs .pop ("hparams" , None )
4741
+ if hparams is None :
4742
+ with open (dir_model / "config.json" , "r" , encoding = "utf-8" ) as f :
4743
+ hparams = json .load (f )
4744
+ super ().__init__ (dir_model , * args , hparams = hparams , ** kwargs )
4745
+
4738
4746
def set_vocab (self ):
4739
4747
vocab_size = self .hparams ["vocab_size" ]
4740
4748
# Round vocab size to next multiple of 8
@@ -4809,6 +4817,100 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
4809
4817
return [(new_name , data_torch )]
4810
4818
4811
4819
4820
+ @ModelBase .register ("Mamba2ForCausalLM" )
4821
+ class Mamba2Model (TextModel ):
4822
+ model_arch = gguf .MODEL_ARCH .MAMBA2
4823
+
4824
+ def __init__ (self , dir_model : Path , * args , ** kwargs ):
4825
+ # Avoid using AutoConfig for hparams
4826
+ # It wrongly assumes all Mamba2 models are Mamba-Codestral-7B-v0.1
4827
+ hparams = kwargs .pop ("hparams" , None )
4828
+ if hparams is None :
4829
+ with open (dir_model / "config.json" , "r" , encoding = "utf-8" ) as f :
4830
+ hparams = json .load (f )
4831
+ super ().__init__ (dir_model , * args , hparams = hparams , ** kwargs )
4832
+
4833
+ def set_vocab (self ):
4834
+ vocab_size = self .hparams ["vocab_size" ]
4835
+ # Round vocab size to next multiple of 16
4836
+ pad_vocab = self .hparams .get ("pad_vocab_size_multiple" , 16 )
4837
+ # pad using ceiling division
4838
+ # ref: https://stackoverflow.com/a/17511341/22827863
4839
+ vocab_size = - (vocab_size // - pad_vocab ) * pad_vocab
4840
+ self .hparams ["vocab_size" ] = vocab_size
4841
+
4842
+ if (self .dir_model / "tokenizer.model" ).is_file ():
4843
+ self ._set_vocab_sentencepiece ()
4844
+ elif (self .dir_model / "tokenizer.model.v3" ).is_file ():
4845
+ # mamba-codestral
4846
+ raise NotImplementedError (f"Please rename { self .dir_model / 'tokenizer.model.v3' } to { self .dir_model / 'tokenizer.model' } " )
4847
+ elif (self .dir_model / "tokenizer.json" ).is_file ():
4848
+ self ._set_vocab_gpt2 ()
4849
+ else :
4850
+ # Use the GPT-NeoX tokenizer when no tokenizer files are present
4851
+ self ._set_vocab_builtin ("gpt-neox" , vocab_size )
4852
+
4853
+ def set_gguf_parameters (self ):
4854
+ d_model = self .find_hparam (["hidden_size" , "d_model" , "dim" ])
4855
+ d_conv = self .find_hparam (["conv_kernel" , "d_conv" ], optional = True ) or 4
4856
+ d_inner = self .find_hparam (["intermediate_size" , "d_inner" ], optional = True ) or 2 * d_model
4857
+ d_state = self .find_hparam (["state_size" , "d_state" ], optional = True ) or 128
4858
+ head_dim = self .find_hparam (["head_dim" ], optional = True ) or 64
4859
+ n_group = self .find_hparam (["n_groups" ], optional = True ) or 1
4860
+
4861
+ rms_norm_eps = self .find_hparam (["layer_norm_epsilon" , "rms_norm_eps" ], optional = True ) or 1e-5
4862
+
4863
+ # Fail early for models which don't have a block expansion factor of 2
4864
+ # TODO: does this really matter?
4865
+ assert d_inner == 2 * d_model
4866
+ assert d_inner % head_dim == 0
4867
+
4868
+ self .gguf_writer .add_context_length (2 ** 20 ) # arbitrary value; for those who use the default
4869
+ self .gguf_writer .add_embedding_length (d_model )
4870
+ self .gguf_writer .add_feed_forward_length (0 ) # unused, but seemingly required when loading
4871
+ self .gguf_writer .add_head_count (0 ) # unused, but seemingly required when loading
4872
+ self .gguf_writer .add_block_count (self .block_count )
4873
+ self .gguf_writer .add_ssm_conv_kernel (d_conv )
4874
+ self .gguf_writer .add_ssm_inner_size (d_inner )
4875
+ self .gguf_writer .add_ssm_state_size (d_state )
4876
+ self .gguf_writer .add_ssm_time_step_rank (d_inner // head_dim )
4877
+ self .gguf_writer .add_ssm_group_count (n_group )
4878
+ self .gguf_writer .add_layer_norm_rms_eps (rms_norm_eps )
4879
+ self .gguf_writer .add_file_type (self .ftype )
4880
+
4881
+ def modify_tensors (self , data_torch : Tensor , name : str , bid : int | None ) -> Iterable [tuple [str , Tensor ]]:
4882
+
4883
+ if name .startswith ("model.backbone" ) or name .startswith ("model.lm_head" ):
4884
+ # map Mamba-Codestral-7B-v0.1 tensor names to the names used by Mamba-2
4885
+ name = name .removeprefix ("model." )
4886
+
4887
+ if name .endswith (".dt_bias" ):
4888
+ name = name .rpartition (".dt_bias" )[0 ] + ".dt_proj.bias"
4889
+
4890
+ new_name = self .map_tensor_name (name )
4891
+
4892
+ if self .match_model_tensor_name (new_name , gguf .MODEL_TENSOR .SSM_CONV1D , bid ):
4893
+ data_torch = data_torch .squeeze ()
4894
+ elif any (self .match_model_tensor_name (new_name , t , bid , suffix = "" ) for t in [
4895
+ gguf .MODEL_TENSOR .SSM_A ,
4896
+ gguf .MODEL_TENSOR .SSM_D ,
4897
+ ]):
4898
+ # unsqueeze A to use similar shape semantics as Mamba-1
4899
+ # (D is also unsqueezed, but for more straightforward broadcast internally)
4900
+ data_torch = data_torch .reshape ((* data_torch .shape , 1 ))
4901
+ elif self .match_model_tensor_name (new_name , gguf .MODEL_TENSOR .SSM_NORM , bid ):
4902
+ d_model = self .find_hparam (["hidden_size" , "d_model" , "dim" ])
4903
+ d_inner = self .find_hparam (["intermediate_size" , "d_inner" ], optional = True ) or 2 * d_model
4904
+ n_group = self .hparams .get ("n_groups" , 1 )
4905
+ data_torch = data_torch .reshape ((n_group , d_inner // n_group ))
4906
+
4907
+ if name .endswith (".A_log" ):
4908
+ logger .debug ("A_log --> A ==> " + new_name )
4909
+ data_torch = - torch .exp (data_torch )
4910
+
4911
+ yield (new_name , data_torch )
4912
+
4913
+
4812
4914
@ModelBase .register ("CohereForCausalLM" )
4813
4915
class CommandR2Model (TextModel ):
4814
4916
model_arch = gguf .MODEL_ARCH .COMMAND_R
@@ -6569,12 +6671,20 @@ def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> st
6569
6671
# maybe we should fallback to text model's arch in that case, since not many models have both
6570
6672
text_config = hparams .get ("text_config" , {})
6571
6673
vision_config = hparams .get ("vision_config" , {})
6572
- arch = hparams ["architectures" ][0 ]
6674
+ arch = None
6675
+ if (arches := hparams .get ("architectures" )) is not None and len (arches ) > 0 :
6676
+ arch = arches [0 ]
6677
+ elif "ssm_cfg" in hparams :
6678
+ # For non-hf Mamba and Mamba2 models
6679
+ arch = hparams ["ssm_cfg" ].get ("layer" , "Mamba" ) + "ForCausalLM"
6680
+
6573
6681
# if "architectures" is found in the sub-config, use that instead
6574
6682
if model_type == ModelType .TEXT and text_config .get ("architectures" ) is not None :
6575
6683
arch = text_config ["architectures" ][0 ]
6576
6684
elif model_type == ModelType .MMPROJ and vision_config .get ("architectures" ) is not None :
6577
6685
arch = vision_config ["architectures" ][0 ]
6686
+ if arch is None :
6687
+ raise ValueError ("Failed to detect model architecture" )
6578
6688
return arch
6579
6689
6580
6690
0 commit comments