Skip to content

Commit 9309ce6

Browse files
committed
remove .rotary_pos_emb.inv_freq and unuse code for chatglm3 model
Signed-off-by: XingXing Qiao <qiaoxx@dingdao.com>
1 parent ff7a649 commit 9309ce6

File tree

1 file changed

+8
-75
lines changed

1 file changed

+8
-75
lines changed

convert-hf-to-gguf.py

Lines changed: 8 additions & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -2700,85 +2700,18 @@ def set_gguf_parameters(self):
27002700
self.gguf_writer.add_rope_dimension_count(64)
27012701
self.gguf_writer.add_add_bos_token(False)
27022702

2703-
def write_tensors(self):
2704-
block_count = self.hparams["num_layers"]
2705-
tensors = dict(self.get_tensors())
2706-
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
2707-
has_lm_head = True
2708-
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
2709-
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
2710-
2711-
for name, data_torch in tensors.items():
2712-
if name.endswith(".rotary_pos_emb.inv_freq"):
2713-
continue
2714-
2715-
if "lm_head.weight" not in tensors.keys() and "output.weight" not in tensors.keys():
2716-
has_lm_head = False
2703+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
2704+
if name.endswith(".rotary_pos_emb.inv_freq"):
2705+
return []
27172706

2718-
name = re.sub(r'transformer\.', '', name)
2707+
del bid # unused
27192708

2720-
old_dtype = data_torch.dtype
2709+
name = re.sub(r'transformer\.', '', name)
27212710

2722-
# convert any unsupported data types to float32
2723-
if data_torch.dtype not in (torch.float16, torch.float32):
2724-
data_torch = data_torch.to(torch.float32)
2711+
if name == "word_embeddings.weight":
2712+
assert self.tensor_names is not None
27252713

2726-
data = data_torch.squeeze().numpy()
2727-
2728-
if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name):
2729-
# Map bloom-style qkv_linear to gpt-style qkv_linear
2730-
# bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa
2731-
# gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa
2732-
qkv_weights = data.reshape((n_head, 3, n_embed // n_head, n_embed))
2733-
data = np.concatenate(
2734-
(
2735-
qkv_weights[:, 0, :, :].reshape((-1, n_embed)),
2736-
qkv_weights[:, 1, :, :].reshape((-1, n_embed)),
2737-
qkv_weights[:, 2, :, :].reshape((-1, n_embed)),
2738-
),
2739-
axis=0,
2740-
)
2741-
print("re-format attention.linear_qkv.weight")
2742-
elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
2743-
qkv_bias = data.reshape((n_head, 3, n_embed // n_head))
2744-
data = np.concatenate(
2745-
(
2746-
qkv_bias[:, 0, :].reshape((n_embed,)),
2747-
qkv_bias[:, 1, :].reshape((n_embed,)),
2748-
qkv_bias[:, 2, :].reshape((n_embed,)),
2749-
),
2750-
axis=0,
2751-
)
2752-
print("re-format attention.linear_qkv.bias")
2753-
2754-
# map tensor names
2755-
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
2756-
if new_name is None:
2757-
print(f"Can not map tensor {name!r}")
2758-
sys.exit()
2759-
2760-
n_dims = len(data.shape)
2761-
data_dtype = data.dtype
2762-
2763-
# if f32 desired, convert any float16 to float32
2764-
if self.ftype == 0 and data_dtype == np.float16:
2765-
data = data.astype(np.float32)
2766-
2767-
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
2768-
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
2769-
data = data.astype(np.float32)
2770-
2771-
# if f16 desired, convert any float32 2-dim weight tensors to float16
2772-
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
2773-
data = data.astype(np.float16)
2774-
2775-
print(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
2776-
2777-
self.gguf_writer.add_tensor(new_name, data)
2778-
2779-
if not has_lm_head and name == "word_embeddings.weight":
2780-
self.gguf_writer.add_tensor("output.weight", data)
2781-
print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
2714+
return [(self.map_tensor_name(name), data_torch)]
27822715

27832716

27842717
###### CONVERSION LOGIC ######

0 commit comments

Comments
 (0)