Skip to content

Commit 7a20c4b

Browse files
committed
restore pertinent logs
1 parent 22ab376 commit 7a20c4b

File tree

1 file changed

+12
-1
lines changed

1 file changed

+12
-1
lines changed

llama.cpp

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3214,7 +3214,6 @@ struct llama_model_loader {
32143214

32153215
// determine file type based on the number of tensors for each quantization and print meta data
32163216
// TODO: make optional
3217-
if(false) //disable this log for now
32183217
{
32193218
std::map<enum ggml_type, uint32_t> n_type;
32203219

@@ -4892,9 +4891,21 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
48924891
LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
48934892
}
48944893
if (ml.n_bytes < GiB) {
4894+
LLAMA_LOG_INFO("%s: model size = %.2f Bytes (%.2f BPW) \n", __func__, ml.n_bytes/1.0, ml.n_bytes*8.0/ml.n_elements);
4895+
LLAMA_LOG_INFO("%s: model size = %.2f KiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0, ml.n_bytes*8.0/ml.n_elements);
48954896
LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
4897+
LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
4898+
LLAMA_LOG_INFO("%s: model size = %.2f KB (%.2f BPW) \n", __func__, ml.n_bytes/1000.0, ml.n_bytes*8.0/ml.n_elements);
4899+
LLAMA_LOG_INFO("%s: model size = %.2f MB (%.2f BPW) \n", __func__, ml.n_bytes/1000.0/1000.0 , ml.n_bytes*8.0/ml.n_elements);
4900+
LLAMA_LOG_INFO("%s: model size = %.2f GB (%.2f BPW) \n", __func__, ml.n_bytes/1000.0/1000.0/1000.0, ml.n_bytes*8.0/ml.n_elements);
48964901
} else {
4902+
LLAMA_LOG_INFO("%s: model size = %.2f Bytes (%.2f BPW) \n", __func__, ml.n_bytes/1.0, ml.n_bytes*8.0/ml.n_elements);
4903+
LLAMA_LOG_INFO("%s: model size = %.2f KiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0, ml.n_bytes*8.0/ml.n_elements);
4904+
LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0 , ml.n_bytes*8.0/ml.n_elements);
48974905
LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
4906+
LLAMA_LOG_INFO("%s: model size = %.2f KB (%.2f BPW) \n", __func__, ml.n_bytes/1000.0, ml.n_bytes*8.0/ml.n_elements);
4907+
LLAMA_LOG_INFO("%s: model size = %.2f MB (%.2f BPW) \n", __func__, ml.n_bytes/1000.0/1000.0 , ml.n_bytes*8.0/ml.n_elements);
4908+
LLAMA_LOG_INFO("%s: model size = %.2f GB (%.2f BPW) \n", __func__, ml.n_bytes/1000.0/1000.0/1000.0, ml.n_bytes*8.0/ml.n_elements);
48984909
}
48994910

49004911
// general kv

0 commit comments

Comments
 (0)