Skip to content

Commit 31fca16

Browse files
committed
*.py: Convert logger error and sys.exit() into a raise exception (for atypical error)
1 parent f45ef6c commit 31fca16

File tree

2 files changed

+30
-57
lines changed

2 files changed

+30
-57
lines changed

convert-hf-to-gguf.py

Lines changed: 28 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -153,8 +153,7 @@ def write_tensors(self):
153153
# map tensor names
154154
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
155155
if new_name is None:
156-
logger.error(f"Can not map tensor {name!r}")
157-
sys.exit()
156+
raise ValueError(f"Can not map tensor {name!r}")
158157

159158
n_dims = len(data.shape)
160159
data_dtype = data.dtype
@@ -486,8 +485,7 @@ def write_tensors(self):
486485
# map tensor names
487486
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
488487
if new_name is None:
489-
logger.error(f"Can not map tensor {name!r}")
490-
sys.exit()
488+
raise ValueError(f"Can not map tensor {name!r}")
491489

492490
n_dims = len(data.shape)
493491
data_dtype = data.dtype
@@ -570,8 +568,7 @@ def write_tensors(self):
570568
else:
571569
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
572570
if new_name is None:
573-
logger.error(f"Can not map tensor {name!r}")
574-
sys.exit()
571+
raise ValueError(f"Can not map tensor {name!r}")
575572

576573
n_dims = len(data.shape)
577574
data_dtype = data.dtype
@@ -614,8 +611,7 @@ def set_gguf_parameters(self):
614611
elif "model_max_length" in self.hparams:
615612
ctx_length = self.hparams["model_max_length"]
616613
else:
617-
logger.error("gguf: can not find ctx length parameter.")
618-
sys.exit()
614+
raise ValueError("gguf: can not find ctx length parameter.")
619615

620616
self.gguf_writer.add_file_type(self.ftype)
621617
self.gguf_writer.add_name(self.dir_model.name)
@@ -653,8 +649,7 @@ def write_tensors(self):
653649
# map tensor names
654650
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
655651
if new_name is None:
656-
logger.error(f"Can not map tensor {name!r}")
657-
sys.exit()
652+
raise ValueError(f"Can not map tensor {name!r}")
658653

659654
n_dims = len(data.shape)
660655
data_dtype = data.dtype
@@ -696,8 +691,7 @@ def set_gguf_parameters(self):
696691
elif "model_max_length" in self.hparams:
697692
ctx_length = self.hparams["model_max_length"]
698693
else:
699-
logger.error("gguf: can not find ctx length parameter.")
700-
sys.exit()
694+
raise ValueError("gguf: can not find ctx length parameter.")
701695

702696
self.gguf_writer.add_name(self.dir_model.name)
703697
self.gguf_writer.add_source_hf_repo(hf_repo)
@@ -751,8 +745,7 @@ def write_tensors(self):
751745
# map tensor names
752746
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
753747
if new_name is None:
754-
logger.error(f"Can not map tensor {name!r}")
755-
sys.exit()
748+
raise ValueError(f"Can not map tensor {name!r}")
756749

757750
n_dims = len(data.shape)
758751
data_dtype = data.dtype
@@ -853,8 +846,7 @@ def set_gguf_parameters(self):
853846
elif "model_max_length" in self.hparams:
854847
ctx_length = self.hparams["model_max_length"]
855848
else:
856-
logger.error("gguf: can not find ctx length parameter.")
857-
sys.exit()
849+
raise ValueError("gguf: can not find ctx length parameter.")
858850

859851
self.gguf_writer.add_name(self.dir_model.name)
860852
self.gguf_writer.add_source_hf_repo(hf_repo)
@@ -903,8 +895,7 @@ def write_tensors(self):
903895
# map tensor names
904896
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
905897
if new_name is None:
906-
logger.error(f"Can not map tensor {name!r}")
907-
sys.exit()
898+
raise ValueError(f"Can not map tensor {name!r}")
908899

909900
n_dims = len(data.shape)
910901
data_dtype = data.dtype
@@ -1008,8 +999,7 @@ def write_tensors(self):
1008999
# map tensor names
10091000
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
10101001
if new_name is None:
1011-
logger.error(f"Can not map tensor {name!r}")
1012-
sys.exit()
1002+
raise ValueError(f"Can not map tensor {name!r}")
10131003

10141004
n_dims = len(data.shape)
10151005
data_dtype = data.dtype
@@ -1111,10 +1101,9 @@ def write_tensors(self):
11111101
data = data_torch.squeeze().numpy()
11121102

11131103
# map tensor names
1114-
new_name = tensor_map.get_name(name, try_suffixes=(".weight",))
1104+
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
11151105
if new_name is None:
1116-
logger.error(f"Can not map tensor {name!r}")
1117-
sys.exit()
1106+
raise ValueError(f"Can not map tensor {name!r}")
11181107

11191108
n_dims = len(data.shape)
11201109
data_dtype = data.dtype
@@ -1180,8 +1169,7 @@ def write_tensors(self):
11801169
data = data_torch.to(torch.float32).squeeze().numpy()
11811170
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
11821171
if new_name is None:
1183-
logger.error(f"Can not map tensor {name!r}")
1184-
sys.exit()
1172+
raise ValueError(f"Can not map tensor {name!r}")
11851173
n_dims = len(data.shape)
11861174
logger.info(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
11871175
self.gguf_writer.add_tensor(new_name, data)
@@ -1383,8 +1371,7 @@ def write_tensors(self):
13831371

13841372
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
13851373
if new_name is None:
1386-
logger.error(f"Can not map tensor {name!r}")
1387-
sys.exit()
1374+
raise ValueError(f"Can not map tensor {name!r}")
13881375

13891376
logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
13901377

@@ -1394,8 +1381,7 @@ def write_tensors(self):
13941381
# map tensor names
13951382
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
13961383
if new_name is None:
1397-
logger.error(f"Can not map tensor {name!r}")
1398-
sys.exit()
1384+
raise ValueError(f"Can not map tensor {name!r}")
13991385

14001386
n_dims = len(data.shape)
14011387
data_dtype = data.dtype
@@ -1487,8 +1473,7 @@ def write_tensors(self):
14871473

14881474
new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
14891475
if new_name is None:
1490-
logger.error(f"Can not map tensor {name!r}")
1491-
sys.exit()
1476+
raise ValueError(f"Can not map tensor {name!r}")
14921477

14931478
logger.info(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
14941479

@@ -1498,8 +1483,7 @@ def write_tensors(self):
14981483
# map tensor names
14991484
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
15001485
if new_name is None:
1501-
logger.error(f"Can not map tensor {name!r}")
1502-
sys.exit()
1486+
raise ValueError(f"Can not map tensor {name!r}")
15031487

15041488
n_dims = len(data.shape)
15051489
data_dtype = data.dtype
@@ -1592,17 +1576,15 @@ def write_tensors(self):
15921576
# https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15
15931577
new_name = tensor_map.get_name(name if not experts else name + ".weight", try_suffixes=(".weight",))
15941578
if new_name is None:
1595-
logger.error(f"Can not map tensor {name!r}")
1596-
sys.exit()
1579+
raise ValueError(f"Can not map tensor {name!r}")
15971580

15981581
n_dims = len(data.shape)
15991582
data_dtype = data.dtype
16001583

16011584
# Most of the codebase that takes in 1D tensors only handles F32 tensors
16021585
# and most of the outputs tensors are F32.
16031586
if data_dtype != np.float32 and n_dims == 1:
1604-
logger.error(f"Can not map tensor {name!r}: all 1D tensors must be F32")
1605-
sys.exit()
1587+
raise ValueError(f"Can not map tensor {name!r}: all 1D tensors must be F32")
16061588

16071589
# if f32 desired, convert any float16 to float32
16081590
if self.ftype == 0 and data_dtype == np.float16:
@@ -1674,8 +1656,7 @@ def write_tensors(self):
16741656
# map tensor names
16751657
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
16761658
if new_name is None:
1677-
logger.error(f"Can not map tensor {name!r}")
1678-
sys.exit()
1659+
raise ValueError(f"Can not map tensor {name!r}")
16791660

16801661
n_dims = len(data.shape)
16811662
data_dtype = data.dtype
@@ -1758,8 +1739,7 @@ def write_tensors(self):
17581739
# map tensor names
17591740
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
17601741
if new_name is None:
1761-
logger.error(f"Can not map tensor {name!r}")
1762-
sys.exit()
1742+
raise ValueError(f"Can not map tensor {name!r}")
17631743

17641744
n_dims = len(data.shape)
17651745
data_dtype = data.dtype
@@ -1921,8 +1901,7 @@ def write_tensors(self):
19211901
# map tensor names
19221902
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
19231903
if new_name is None:
1924-
logger.error(f"Can not map tensor {name!r}")
1925-
sys.exit()
1904+
raise ValueError(f"Can not map tensor {name!r}")
19261905

19271906
n_dims = len(data.shape)
19281907
data_dtype = data.dtype
@@ -2019,8 +1998,7 @@ def write_tensors(self):
20191998
# map tensor names
20201999
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
20212000
if new_name is None:
2022-
logger.error(f"Can not map tensor {name!r}")
2023-
sys.exit()
2001+
raise ValueError(f"Can not map tensor {name!r}")
20242002

20252003
# shuffle for broadcasting of gqa in ggml_mul_mat
20262004
if new_name.endswith("attn_q.weight"):
@@ -2097,8 +2075,7 @@ def write_tensors(self):
20972075
# map tensor names
20982076
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
20992077
if new_name is None:
2100-
logger.error(f"Can not map tensor {name!r}")
2101-
sys.exit()
2078+
raise ValueError(f"Can not map tensor {name!r}")
21022079

21032080
n_dims = len(data.shape)
21042081
data_dtype = data.dtype
@@ -2243,8 +2220,7 @@ def post_write_tensors(self, tensor_map, name, data_torch):
22432220
# map tensor names
22442221
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
22452222
if new_name is None:
2246-
logger.error(f"Can not map tensor {name!r}")
2247-
sys.exit()
2223+
raise ValueError(f"Can not map tensor {name!r}")
22482224

22492225
n_dims = len(data.shape)
22502226
data_dtype = data.dtype
@@ -2371,8 +2347,7 @@ def write_tensors(self):
23712347
# map tensor names
23722348
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
23732349
if new_name is None:
2374-
logger.error(f"Can not map tensor {name!r}")
2375-
sys.exit()
2350+
raise ValueError(f"Can not map tensor {name!r}")
23762351

23772352
data = data_torch.squeeze().numpy()
23782353
n_dims = len(data.shape)
@@ -2481,8 +2456,7 @@ def write_tensors(self):
24812456
# map tensor names
24822457
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
24832458
if new_name is None:
2484-
logger.error(f"Can not map tensor {name!r}")
2485-
sys.exit()
2459+
raise ValueError(f"Can not map tensor {name!r}")
24862460

24872461
n_dims = len(data.shape)
24882462
data_dtype = data.dtype
@@ -2584,8 +2558,7 @@ def write_tensors(self):
25842558
# map tensor names
25852559
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
25862560
if new_name is None:
2587-
logger.error(f"Can not map tensor {name!r}")
2588-
sys.exit()
2561+
raise ValueError(f"Can not map tensor {name!r}")
25892562

25902563
if name.endswith(".A_log"):
25912564
logger.debug("A_log --> A ==> " + new_name)

convert-persimmon-to-gguf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,8 @@ def main():
121121
data = data_torch.to(torch.float32).squeeze().numpy()
122122
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
123123
if new_name is None:
124-
logger.error(f"Can not map tensor '{name}'")
125-
sys.exit()
124+
raise ValueError(f"Can not map tensor '{name}'")
125+
126126
n_dims = len(data.shape)
127127
logger.debug(f"{new_name}, n_dims = {str(n_dims)}, {str(old_dtype)} --> {str(data.dtype)}")
128128
gguf_writer.add_tensor(new_name, data)

0 commit comments

Comments
 (0)