We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fbe57ae commit 79e2a37Copy full SHA for 79e2a37
convert_hf_to_gguf.py
@@ -6692,6 +6692,16 @@ def prepare_tensors(self):
6692
class SmolLM3Model(LlamaModel):
6693
model_arch = gguf.MODEL_ARCH.SMOLLM3
6694
6695
+ def set_vocab(self):
6696
+ super().set_vocab()
6697
+ # remove unsupported array slicing in chat template
6698
+ # ref: https://huggingface.co/ggml-org/SmolLM3-3B-GGUF/discussions/1
6699
+ from transformers import AutoTokenizer
6700
+ tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
6701
+ if tokenizer.chat_template is not None:
6702
+ chat_template = tokenizer.chat_template.replace("[:]", "")
6703
+ self.gguf_writer.add_chat_template(chat_template)
6704
+
6705
###### CONVERSION LOGIC ######
6706
6707
0 commit comments