Skip to content

Commit 6bda7ce

Browse files
authored
llama : add pre-tokenizer regexes for BLOOM and gpt3-finnish (#8850)
1 parent d5492f0 commit 6bda7ce

File tree

5 files changed

+19
-1
lines changed

5 files changed

+19
-1
lines changed

convert_hf_to_gguf.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -590,6 +590,12 @@ def get_vocab_base_pre(self, tokenizer) -> str:
590590
if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249":
591591
# ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M
592592
res = "smollm"
593+
if chkhsh == "3c30d3ad1d6b64202cd222813e7736c2db6e1bd6d67197090fc1211fbc612ae7":
594+
# ref: https://huggingface.co/bigscience/bloom
595+
res = "bloom"
596+
if chkhsh == "bc01ce58980e1db43859146dc51b1758b3b88729b217a74792e9f8d43e479d21":
597+
# ref: https://huggingface.co/TurkuNLP/gpt3-finnish-small
598+
res = "gpt3-finnish"
593599

594600
if res is None:
595601
logger.warning("\n")
@@ -893,7 +899,7 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
893899
return tensors
894900

895901

896-
@Model.register("BloomForCausalLM")
902+
@Model.register("BloomForCausalLM", "BloomModel")
897903
class BloomModel(Model):
898904
model_arch = gguf.MODEL_ARCH.BLOOM
899905

convert_hf_to_gguf_update.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ class TOKENIZER_TYPE(IntEnum):
9494
{"name": "codeshell", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/WisdomShell/CodeShell-7B", },
9595
{"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", },
9696
{"name": "smollm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", },
97+
{'name': "bloom", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigscience/bloom", },
98+
{'name': "gpt3-finnish", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/TurkuNLP/gpt3-finnish-small", },
9799
]
98100

99101

include/llama.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ extern "C" {
9393
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
9494
LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21,
9595
LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22,
96+
LLAMA_VOCAB_PRE_TYPE_BLOOM = 23,
97+
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
9698
};
9799

98100
enum llama_rope_type {

src/llama-vocab.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,8 @@ struct llm_tokenizer_bpe {
410410
};
411411
break;
412412
case LLAMA_VOCAB_PRE_TYPE_PORO:
413+
case LLAMA_VOCAB_PRE_TYPE_BLOOM:
414+
case LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH:
413415
regex_exprs = {
414416
" ?[^(\\s|.,!?…。,、।۔،)]+",
415417
};

src/llama.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5467,6 +5467,12 @@ static void llm_load_vocab(
54675467
} else if (
54685468
tokenizer_pre == "codeshell") {
54695469
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
5470+
} else if (
5471+
tokenizer_pre == "bloom") {
5472+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
5473+
} else if (
5474+
tokenizer_pre == "gpt3-finnish") {
5475+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
54705476
} else {
54715477
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
54725478
}

0 commit comments

Comments
 (0)