Skip to content

Commit fdc9a8d

Browse files
committed
Merge remote-tracking branch 'origin/compilade/mamba2' into mamba2-sync
* origin/compilade/mamba2: (29 commits) mamba : fix mismatched new and delete size for llm_build_mamba cuda : implement ssm scan for Mamba2 ggml-cpu : reorder SVE FMA for consistency with other SIMD arches ggml : fix mamba2 ssm scan when compiled with SVE graph : fix recurrent state copies when avoiding copies kv-cache : allow context shift for recurrent models convert : avoid AutoConfig for Mamba and Mamba2 hparams kv-cache : remove const_cast when setting inputs for s_copy metal : single-user mamba2 inference works metal : add missing args for nb references in ssm_scan_f32_group metal : fix confusion between ; and , convert : fix flake8 lint ggml : avoid multiply by D in GGML_OP_SSM_SCAN ggml : remove unused fast broadcast path in GGML_MUL metal : fix wrong number of tokens per sequence in SSM_SCAN metal : fix SSM_SCAN state head offset metal : add back n_seqs to SSM_SCAN args metal : remove unused arguments for SSM_SCAN metal : use log and exp instead of log1pf and expf in SSM_SCAN metal : fix SSM_SCAN pipeline scope ...
2 parents 4367806 + dc1d109 commit fdc9a8d

24 files changed

+1081
-318
lines changed

convert_hf_to_gguf.py

Lines changed: 111 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4735,6 +4735,14 @@ def set_gguf_parameters(self):
47354735
class MambaModel(TextModel):
47364736
model_arch = gguf.MODEL_ARCH.MAMBA
47374737

4738+
def __init__(self, dir_model: Path, *args, **kwargs):
4739+
# Avoid using AutoConfig for hparams
4740+
hparams = kwargs.pop("hparams", None)
4741+
if hparams is None:
4742+
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
4743+
hparams = json.load(f)
4744+
super().__init__(dir_model, *args, hparams=hparams, **kwargs)
4745+
47384746
def set_vocab(self):
47394747
vocab_size = self.hparams["vocab_size"]
47404748
# Round vocab size to next multiple of 8
@@ -4809,6 +4817,100 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
48094817
return [(new_name, data_torch)]
48104818

48114819

4820+
@ModelBase.register("Mamba2ForCausalLM")
4821+
class Mamba2Model(TextModel):
4822+
model_arch = gguf.MODEL_ARCH.MAMBA2
4823+
4824+
def __init__(self, dir_model: Path, *args, **kwargs):
4825+
# Avoid using AutoConfig for hparams
4826+
# It wrongly assumes all Mamba2 models are Mamba-Codestral-7B-v0.1
4827+
hparams = kwargs.pop("hparams", None)
4828+
if hparams is None:
4829+
with open(dir_model / "config.json", "r", encoding="utf-8") as f:
4830+
hparams = json.load(f)
4831+
super().__init__(dir_model, *args, hparams=hparams, **kwargs)
4832+
4833+
def set_vocab(self):
4834+
vocab_size = self.hparams["vocab_size"]
4835+
# Round vocab size to next multiple of 16
4836+
pad_vocab = self.hparams.get("pad_vocab_size_multiple", 16)
4837+
# pad using ceiling division
4838+
# ref: https://stackoverflow.com/a/17511341/22827863
4839+
vocab_size = -(vocab_size // -pad_vocab) * pad_vocab
4840+
self.hparams["vocab_size"] = vocab_size
4841+
4842+
if (self.dir_model / "tokenizer.model").is_file():
4843+
self._set_vocab_sentencepiece()
4844+
elif (self.dir_model / "tokenizer.model.v3").is_file():
4845+
# mamba-codestral
4846+
raise NotImplementedError(f"Please rename {self.dir_model / 'tokenizer.model.v3'} to {self.dir_model / 'tokenizer.model'}")
4847+
elif (self.dir_model / "tokenizer.json").is_file():
4848+
self._set_vocab_gpt2()
4849+
else:
4850+
# Use the GPT-NeoX tokenizer when no tokenizer files are present
4851+
self._set_vocab_builtin("gpt-neox", vocab_size)
4852+
4853+
def set_gguf_parameters(self):
4854+
d_model = self.find_hparam(["hidden_size", "d_model", "dim"])
4855+
d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4
4856+
d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
4857+
d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 128
4858+
head_dim = self.find_hparam(["head_dim"], optional=True) or 64
4859+
n_group = self.find_hparam(["n_groups"], optional=True) or 1
4860+
4861+
rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
4862+
4863+
# Fail early for models which don't have a block expansion factor of 2
4864+
# TODO: does this really matter?
4865+
assert d_inner == 2 * d_model
4866+
assert d_inner % head_dim == 0
4867+
4868+
self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
4869+
self.gguf_writer.add_embedding_length(d_model)
4870+
self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
4871+
self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
4872+
self.gguf_writer.add_block_count(self.block_count)
4873+
self.gguf_writer.add_ssm_conv_kernel(d_conv)
4874+
self.gguf_writer.add_ssm_inner_size(d_inner)
4875+
self.gguf_writer.add_ssm_state_size(d_state)
4876+
self.gguf_writer.add_ssm_time_step_rank(d_inner // head_dim)
4877+
self.gguf_writer.add_ssm_group_count(n_group)
4878+
self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
4879+
self.gguf_writer.add_file_type(self.ftype)
4880+
4881+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
4882+
4883+
if name.startswith("model.backbone") or name.startswith("model.lm_head"):
4884+
# map Mamba-Codestral-7B-v0.1 tensor names to the names used by Mamba-2
4885+
name = name.removeprefix("model.")
4886+
4887+
if name.endswith(".dt_bias"):
4888+
name = name.rpartition(".dt_bias")[0] + ".dt_proj.bias"
4889+
4890+
new_name = self.map_tensor_name(name)
4891+
4892+
if self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_CONV1D, bid):
4893+
data_torch = data_torch.squeeze()
4894+
elif any(self.match_model_tensor_name(new_name, t, bid, suffix="") for t in [
4895+
gguf.MODEL_TENSOR.SSM_A,
4896+
gguf.MODEL_TENSOR.SSM_D,
4897+
]):
4898+
# unsqueeze A to use similar shape semantics as Mamba-1
4899+
# (D is also unsqueezed, but for more straightforward broadcast internally)
4900+
data_torch = data_torch.reshape((*data_torch.shape, 1))
4901+
elif self.match_model_tensor_name(new_name, gguf.MODEL_TENSOR.SSM_NORM, bid):
4902+
d_model = self.find_hparam(["hidden_size", "d_model", "dim"])
4903+
d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model
4904+
n_group = self.hparams.get("n_groups", 1)
4905+
data_torch = data_torch.reshape((n_group, d_inner // n_group))
4906+
4907+
if name.endswith(".A_log"):
4908+
logger.debug("A_log --> A ==> " + new_name)
4909+
data_torch = -torch.exp(data_torch)
4910+
4911+
yield (new_name, data_torch)
4912+
4913+
48124914
@ModelBase.register("CohereForCausalLM")
48134915
class CommandR2Model(TextModel):
48144916
model_arch = gguf.MODEL_ARCH.COMMAND_R
@@ -6569,12 +6671,20 @@ def get_model_architecture(hparams: dict[str, Any], model_type: ModelType) -> st
65696671
# maybe we should fallback to text model's arch in that case, since not many models have both
65706672
text_config = hparams.get("text_config", {})
65716673
vision_config = hparams.get("vision_config", {})
6572-
arch = hparams["architectures"][0]
6674+
arch = None
6675+
if (arches := hparams.get("architectures")) is not None and len(arches) > 0:
6676+
arch = arches[0]
6677+
elif "ssm_cfg" in hparams:
6678+
# For non-hf Mamba and Mamba2 models
6679+
arch = hparams["ssm_cfg"].get("layer", "Mamba") + "ForCausalLM"
6680+
65736681
# if "architectures" is found in the sub-config, use that instead
65746682
if model_type == ModelType.TEXT and text_config.get("architectures") is not None:
65756683
arch = text_config["architectures"][0]
65766684
elif model_type == ModelType.MMPROJ and vision_config.get("architectures") is not None:
65776685
arch = vision_config["architectures"][0]
6686+
if arch is None:
6687+
raise ValueError("Failed to detect model architecture")
65786688
return arch
65796689

65806690

ggml/include/ggml.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1911,7 +1911,8 @@ extern "C" {
19111911
struct ggml_tensor * dt,
19121912
struct ggml_tensor * A,
19131913
struct ggml_tensor * B,
1914-
struct ggml_tensor * C);
1914+
struct ggml_tensor * C,
1915+
struct ggml_tensor * ids);
19151916

19161917
// partition into non-overlapping windows with padding if needed
19171918
// example:

0 commit comments

Comments
 (0)