Skip to content

Commit d143140

Browse files
committed
bugfix: Omitted the colon, resulting in the failure of execution of convert-hf-to-gguf.py
Signed-off-by: XingXing Qiao <qiaoxx@dingdao.com>
1 parent d6dac85 commit d143140

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

gguf-py/gguf/tensor_mapping.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ class TensorNameMap:
122122
"h.{bid}.attn.c_attn", # gpt2
123123
"transformer.h.{bid}.mixer.Wqkv", # phi2
124124
"encoder.layers.{bid}.attn.Wqkv", # nomic-bert
125-
"model.layers.{bid}.self_attn.qkv_proj" # phi3
125+
"model.layers.{bid}.self_attn.qkv_proj", # phi3
126126
"encoder.layers.{bid}.self_attention.query_key_value", # chatglm
127127
),
128128

@@ -134,7 +134,7 @@ class TensorNameMap:
134134
"transformer.h.{bid}.attn.q_proj", # gpt-j
135135
"model.layers.layers.{bid}.self_attn.q_proj", # plamo
136136
"model.layers.{bid}.attention.wq", # internlm2
137-
"transformer.decoder_layer.{bid}.multi_head_attention.query" # Grok
137+
"transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
138138
),
139139

140140
# Attention key
@@ -145,7 +145,7 @@ class TensorNameMap:
145145
"transformer.h.{bid}.attn.k_proj", # gpt-j
146146
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
147147
"model.layers.{bid}.attention.wk", # internlm2
148-
"transformer.decoder_layer.{bid}.multi_head_attention.key" # Grok
148+
"transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
149149
),
150150

151151
# Attention value
@@ -156,7 +156,7 @@ class TensorNameMap:
156156
"transformer.h.{bid}.attn.v_proj", # gpt-j
157157
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
158158
"model.layers.{bid}.attention.wv", # internlm2
159-
"transformer.decoder_layer.{bid}.multi_head_attention.value" # Grok
159+
"transformer.decoder_layer.{bid}.multi_head_attention.value",# Grok
160160
),
161161

162162
# Attention output

0 commit comments

Comments
 (0)