Skip to content

Commit cddc899

Browse files
committed
llama : various integer type cast and format string fixes
Some platforms use "%lu" and others "%llu" for uint64_t. Not sure how to handle that, so casting to size_t when displaying errors.
1 parent 9e22064 commit cddc899

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

src/llama.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17357,7 +17357,7 @@ struct llama_data_context {
1735717357
}
1735817358

1735917359
void write_logits(const struct llama_context * ctx) {
17360-
const uint64_t logits_size = std::min(ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab);
17360+
const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab);
1736117361

1736217362
write(&logits_size, sizeof(logits_size));
1736317363

@@ -17367,7 +17367,7 @@ struct llama_data_context {
1736717367
}
1736817368

1736917369
void write_embeddings(const struct llama_context * ctx) {
17370-
const uint64_t embeddings_size = std::min(ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd);
17370+
const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd);
1737117371

1737217372
write(&embeddings_size, sizeof(embeddings_size));
1737317373

@@ -17461,7 +17461,7 @@ struct llama_data_context {
1746117461
write(&v_type_i, sizeof(v_type_i));
1746217462

1746317463
// Write element size
17464-
const uint64_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
17464+
const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
1746517465
write(&v_size_el, sizeof(v_size_el));
1746617466

1746717467
// Write GQA embedding size
@@ -17710,7 +17710,7 @@ struct llama_data_read_context {
1771017710
const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
1771117711
if (k_size_row != k_size_row_ref) {
1771217712
// llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
17713-
LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, k_size_row_ref, il);
17713+
LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
1771417714
return false;
1771517715
}
1771617716

@@ -17739,7 +17739,7 @@ struct llama_data_read_context {
1773917739
read_to(&v_size_row_ref, sizeof(v_size_row_ref));
1774017740
const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
1774117741
if (v_size_row != v_size_row_ref) {
17742-
LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, v_size_row_ref, il);
17742+
LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
1774317743
return false;
1774417744
}
1774517745

@@ -17763,11 +17763,11 @@ struct llama_data_read_context {
1776317763
}
1776417764

1776517765
// Read element size of value
17766-
uint64_t v_size_el_ref;
17766+
uint32_t v_size_el_ref;
1776717767
read_to(&v_size_el_ref, sizeof(v_size_el_ref));
1776817768
const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
1776917769
if (v_size_el != v_size_el_ref) {
17770-
LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %llu, layer %d)\n", __func__, v_size_el, v_size_el_ref, il);
17770+
LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
1777117771
return false;
1777217772
}
1777317773

0 commit comments

Comments
 (0)