Skip to content

Commit 71d8bd6

Browse files
committed
Added support for the snowflake-arctic model.
1 parent c4ec9c0 commit 71d8bd6

File tree

4 files changed

+447
-3
lines changed

4 files changed

+447
-3
lines changed

convert-hf-to-gguf.py

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1516,6 +1516,119 @@ def write_tensors(self):
15161516
if len(experts) > 0:
15171517
raise ValueError(f"Unprocessed experts: {experts.keys()}")
15181518

1519+
@Model.register("ArcticForCausalLM")
1520+
class ArcticModel(Model):
1521+
model_arch = gguf.MODEL_ARCH.ARCTIC
1522+
1523+
def set_vocab(self):
1524+
self._set_vocab_llama_hf()
1525+
1526+
def set_gguf_parameters(self):
1527+
super().set_gguf_parameters()
1528+
hparams = self.hparams
1529+
self.gguf_writer.add_vocab_size(hparams["vocab_size"])
1530+
self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
1531+
1532+
# Same as super class, but permuting q_proj, k_proj
1533+
def write_tensors(self):
1534+
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
1535+
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
1536+
n_head = self.hparams.get("num_attention_heads")
1537+
n_kv_head = self.hparams.get("num_key_value_heads")
1538+
n_experts = self.hparams.get("num_local_experts")
1539+
experts = dict()
1540+
for name, data_torch in self.get_tensors():
1541+
# we don't need these
1542+
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
1543+
continue
1544+
1545+
old_dtype = data_torch.dtype
1546+
1547+
# convert any unsupported data types to float32
1548+
if data_torch.dtype not in (torch.float16, torch.float32):
1549+
data_torch = data_torch.to(torch.float32)
1550+
1551+
data = data_torch.numpy()
1552+
1553+
if name.endswith("q_proj.weight"):
1554+
data = permute(data, n_head, n_head)
1555+
if name.endswith("k_proj.weight"):
1556+
data = permute(data, n_head, n_kv_head)
1557+
1558+
data = data.squeeze()
1559+
1560+
# process the experts separately
1561+
if name.find("block_sparse_moe.experts") != -1:
1562+
experts[name] = data
1563+
if len(experts) >= n_experts:
1564+
# merge the experts into a single 3d tensor
1565+
for bid in range(block_count):
1566+
for wid in range(1, 4):
1567+
full = True
1568+
for xid in range(n_experts):
1569+
ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
1570+
if ename not in experts:
1571+
full = False
1572+
break
1573+
if not full:
1574+
continue
1575+
1576+
datas = []
1577+
for xid in range(n_experts):
1578+
ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
1579+
datas.append(experts[ename])
1580+
del experts[ename]
1581+
1582+
data = np.stack(datas, axis=0)
1583+
data_dtype = data.dtype
1584+
1585+
if self.ftype == 0 and data_dtype == np.float16:
1586+
data = data.astype(np.float32)
1587+
1588+
if self.ftype == 1 and data_dtype == np.float32:
1589+
data = data.astype(np.float16)
1590+
1591+
merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight"
1592+
1593+
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
1594+
if new_name is None:
1595+
print(f"Can not map tensor {name!r}")
1596+
sys.exit()
1597+
1598+
print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
1599+
1600+
self.gguf_writer.add_tensor(new_name, data)
1601+
continue
1602+
1603+
# map tensor names
1604+
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
1605+
if new_name is None:
1606+
print(f"Can not map tensor {name!r}")
1607+
sys.exit()
1608+
1609+
n_dims = len(data.shape)
1610+
data_dtype = data.dtype
1611+
1612+
# if f32 desired, convert any float16 to float32
1613+
if self.ftype == 0 and data_dtype == np.float16:
1614+
data = data.astype(np.float32)
1615+
1616+
# 1d tensors need to be converted to float32
1617+
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
1618+
data = data.astype(np.float32)
1619+
1620+
# if f16 desired, convert any float32 2-dim weight tensors to float16
1621+
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
1622+
data = data.astype(np.float16)
1623+
1624+
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
1625+
1626+
self.gguf_writer.add_tensor(new_name, data)
1627+
1628+
if len(experts) > 0:
1629+
raise ValueError(f"Unprocessed experts: {experts.keys()}")
1630+
1631+
15191632

15201633
@Model.register("GrokForCausalLM")
15211634
class GrokModel(Model):

gguf-py/gguf/constants.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ class MODEL_ARCH(IntEnum):
138138
COMMAND_R = auto()
139139
DBRX = auto()
140140
OLMO = auto()
141+
ARCTIC = auto()
141142

142143

143144
class MODEL_TENSOR(IntEnum):
@@ -180,6 +181,7 @@ class MODEL_TENSOR(IntEnum):
180181
SSM_A = auto()
181182
SSM_D = auto()
182183
SSM_OUT = auto()
184+
FFN_NORM_EXP = auto()
183185

184186

185187
MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
@@ -215,6 +217,7 @@ class MODEL_TENSOR(IntEnum):
215217
MODEL_ARCH.COMMAND_R: "command-r",
216218
MODEL_ARCH.DBRX: "dbrx",
217219
MODEL_ARCH.OLMO: "olmo",
220+
MODEL_ARCH.ARCTIC: "arctic",
218221
}
219222

220223
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
@@ -257,6 +260,7 @@ class MODEL_TENSOR(IntEnum):
257260
MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a",
258261
MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d",
259262
MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out",
263+
MODEL_TENSOR.FFN_NORM_EXP: "blk.{bid}.ffn_norm_exps",
260264
}
261265

262266
MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
@@ -725,6 +729,27 @@ class MODEL_TENSOR(IntEnum):
725729
MODEL_TENSOR.FFN_DOWN,
726730
MODEL_TENSOR.FFN_UP,
727731
],
732+
MODEL_ARCH.ARCTIC: [
733+
MODEL_TENSOR.TOKEN_EMBD,
734+
MODEL_TENSOR.OUTPUT_NORM,
735+
MODEL_TENSOR.OUTPUT,
736+
MODEL_TENSOR.ROPE_FREQS,
737+
MODEL_TENSOR.ATTN_NORM,
738+
MODEL_TENSOR.ATTN_Q,
739+
MODEL_TENSOR.ATTN_K,
740+
MODEL_TENSOR.ATTN_V,
741+
MODEL_TENSOR.ATTN_OUT,
742+
MODEL_TENSOR.ATTN_ROT_EMBD,
743+
MODEL_TENSOR.FFN_GATE_INP,
744+
MODEL_TENSOR.FFN_NORM,
745+
MODEL_TENSOR.FFN_GATE,
746+
MODEL_TENSOR.FFN_DOWN,
747+
MODEL_TENSOR.FFN_UP,
748+
MODEL_TENSOR.FFN_GATE_EXP,
749+
MODEL_TENSOR.FFN_DOWN_EXP,
750+
MODEL_TENSOR.FFN_UP_EXP,
751+
MODEL_TENSOR.FFN_NORM_EXP,
752+
],
728753
# TODO
729754
}
730755

gguf-py/gguf/tensor_mapping.py

Lines changed: 64 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,64 @@ class TensorNameMap:
370370
"model.layers.{bid}.out_proj",
371371
"backbone.layers.{bid}.mixer.out_proj",
372372
),
373+
374+
}
375+
376+
# architecture-specific block mappings
377+
arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
378+
MODEL_ARCH.ARCTIC: {
379+
MODEL_TENSOR.TOKEN_EMBD: (
380+
"model.embed_tokens",
381+
),
382+
MODEL_TENSOR.OUTPUT_NORM: (
383+
"model.norm",
384+
),
385+
MODEL_TENSOR.OUTPUT: (
386+
"lm_head",
387+
),
388+
MODEL_TENSOR.ATTN_NORM: (
389+
"model.layers.{bid}.input_layernorm",
390+
),
391+
MODEL_TENSOR.ATTN_Q: (
392+
"model.layers.{bid}.self_attn.q_proj",
393+
),
394+
MODEL_TENSOR.ATTN_K: (
395+
"model.layers.{bid}.self_attn.k_proj",
396+
),
397+
MODEL_TENSOR.ATTN_V: (
398+
"model.layers.{bid}.self_attn.v_proj",
399+
),
400+
MODEL_TENSOR.ATTN_OUT: (
401+
"model.layers.{bid}.self_attn.o_proj",
402+
),
403+
MODEL_TENSOR.FFN_GATE_INP: (
404+
"model.layers.{bid}.block_sparse_moe.gate",
405+
),
406+
MODEL_TENSOR.FFN_NORM: (
407+
"model.layers.{bid}.residual_layernorm",
408+
),
409+
MODEL_TENSOR.FFN_GATE: (
410+
"model.layers.{bid}.residual_mlp.w1",
411+
),
412+
MODEL_TENSOR.FFN_DOWN: (
413+
"model.layers.{bid}.residual_mlp.w2",
414+
),
415+
MODEL_TENSOR.FFN_UP: (
416+
"model.layers.{bid}.residual_mlp.w3",
417+
),
418+
MODEL_TENSOR.FFN_GATE_EXP: (
419+
"layers.{bid}.feed_forward.experts.w1",
420+
),
421+
MODEL_TENSOR.FFN_DOWN_EXP: (
422+
"layers.{bid}.feed_forward.experts.w2",
423+
),
424+
MODEL_TENSOR.FFN_UP_EXP: (
425+
"layers.{bid}.feed_forward.experts.w3",
426+
),
427+
MODEL_TENSOR.FFN_NORM_EXP: (
428+
"model.layers.{bid}.post_attention_layernorm",
429+
),
430+
},
373431
}
374432

375433
mapping: dict[str, tuple[MODEL_TENSOR, str]]
@@ -383,12 +441,16 @@ def __init__(self, arch: MODEL_ARCH, n_blocks: int):
383441
self.mapping[tensor_name] = (tensor, tensor_name)
384442
for key in keys:
385443
self.mapping[key] = (tensor, tensor_name)
444+
if arch in self.arch_block_mappings_cfg:
445+
block_mappings = self.arch_block_mappings_cfg[arch]
446+
else:
447+
block_mappings = self.block_mappings_cfg
386448
for bid in range(n_blocks):
387-
for tensor, keys in self.block_mappings_cfg.items():
449+
for tensor, keys in block_mappings.items():
388450
if tensor not in MODEL_TENSORS[arch]:
389451
continue
390452
# TODO: make this configurable
391-
n_experts = 60
453+
n_experts = 128
392454
for xid in range(n_experts):
393455
tensor_name = TENSOR_NAMES[tensor].format(bid = bid, xid = xid)
394456
self.mapping[tensor_name] = (tensor, tensor_name)

0 commit comments

Comments
 (0)