Skip to content

Commit 3173a62

Browse files
committed
stdout : vertical align outputs for better readibility
1 parent 489537e commit 3173a62

File tree

2 files changed

+10
-9
lines changed

2 files changed

+10
-9
lines changed

convert.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -951,8 +951,9 @@ def do_item(item: Tuple[str, LazyTensor]) -> NDArray:
951951

952952
ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8)
953953
for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
954-
size = ' x '.join(map(str, lazy_tensor.shape))
955-
print(f"[{i+1}/{len(model)}] Writing tensor {name}, size {size}...")
954+
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
955+
padi = len(str(len(model)))
956+
print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}")
956957
of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type)
957958
ndarray.tofile(of.fout)
958959
of.fout.close()

llama.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -262,12 +262,12 @@ static size_t checked_div(size_t a, size_t b) {
262262
}
263263

264264
static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) {
265-
std::string ret = "[" + std::to_string(ne.at(0));
265+
char buf[256];
266+
snprintf(buf, sizeof(buf), "%5u", ne.at(0));
266267
for (size_t i = 1; i < ne.size(); i++) {
267-
ret += " x " + std::to_string(ne.at(i));
268+
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i));
268269
}
269-
ret += "]";
270-
return ret;
270+
return buf;
271271
}
272272

273273
static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) {
@@ -942,8 +942,8 @@ static void llama_model_load_internal(
942942
ml->ggml_ctx = ctx;
943943

944944
model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab});
945-
model.norm = ml->get_tensor("norm.weight", {n_embd});
946-
model.output = ml->get_tensor("output.weight", {n_embd, n_vocab});
945+
model.norm = ml->get_tensor("norm.weight", {n_embd});
946+
model.output = ml->get_tensor("output.weight", {n_embd, n_vocab});
947947

948948
model.layers.resize(n_layer);
949949
for (uint32_t i = 0; i < n_layer; ++i) {
@@ -1570,7 +1570,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
15701570
tensor.data = read_data.addr;
15711571
model_loader->load_data_for(tensor);
15721572

1573-
printf("[%zu/%zu] %36s - %s, type = %6s, ",
1573+
printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
15741574
++idx, model_loader->tensors_map.tensors.size(),
15751575
tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
15761576
ggml_type_name(tensor.type));

0 commit comments

Comments
 (0)