Skip to content

Commit 3dd654c

Browse files
committed
mamba : fix vocab size problems with official models
The perplexity was waaaay to high for models with a non-round vocab size. Not sure why, but it needed to be fixed in the metadata. Note that this breaks existing GGUF-converted Mamba models, but **only if** the vocab size was not already rounded.
1 parent 5a8961a commit 3dd654c

File tree

2 files changed

+9
-4
lines changed

2 files changed

+9
-4
lines changed

convert-hf-to-gguf.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1768,6 +1768,13 @@ def get_tensors(self):
17681768

17691769

17701770
class MambaModel(Model):
1771+
def set_vocab(self):
1772+
vocab_size = self.hparams["vocab_size"];
1773+
# Round vocab size to next multiple of 8
1774+
pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8);
1775+
self.hparams["vocab_size"] = ((vocab_size + (pad_vocab - 1)) // pad_vocab) * pad_vocab
1776+
return self._set_vocab_gpt2()
1777+
17711778
def set_gguf_parameters(self):
17721779
d_model = self.hparams["d_model"]
17731780
d_inner = self.hparams.get("d_inner", 2 * d_model)

llama.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4426,15 +4426,13 @@ static bool llm_load_tensors(
44264426
// FIXME: ceiling instead of floor
44274427
const int64_t dt_rank = n_embd / 16;
44284428
GGML_ASSERT(2 * n_embd == d_inner);
4429-
// round up the vocab size to the next multiple of 8
4430-
const int64_t rounded_vocab = (n_vocab + 7) & -8;
44314429

4432-
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, rounded_vocab});
4430+
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
44334431

44344432
// output
44354433
{
44364434
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
4437-
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, rounded_vocab});
4435+
model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
44384436
}
44394437

44404438
for (int i = 0; i < n_layer; ++i) {

0 commit comments

Comments
 (0)