Skip to content

Commit e5f0ade

Browse files
committed
fix: Use per-layer sizing everywhere in kv caches
Branch: GraniteFour Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
1 parent 822b40c commit e5f0ade

File tree

2 files changed

+16
-16
lines changed

2 files changed

+16
-16
lines changed

src/llama-kv-cache-recurrent.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
6969
continue;
7070
}
7171

72-
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
73-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
72+
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(i);
73+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(i);
7474

7575
const char * dev_name = "CPU";
7676

@@ -756,7 +756,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
756756
// Iterate and write all the keys first, each row is a cell
757757
// Get whole range at a time
758758
for (uint32_t il = 0; il < n_layer; ++il) {
759-
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
759+
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
760760

761761
// Write key type
762762
const int32_t k_type_i = (int32_t)k_l[il]->type;
@@ -776,7 +776,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
776776

777777
if (!v_trans) {
778778
for (uint32_t il = 0; il < n_layer; ++il) {
779-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
779+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
780780

781781
// Write value type
782782
const int32_t v_type_i = (int32_t)v_l[il]->type;
@@ -797,7 +797,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
797797
// When v is transposed, we also need the element size and get the element ranges from each row
798798
const uint32_t kv_size = size;
799799
for (uint32_t il = 0; il < n_layer; ++il) {
800-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
800+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
801801

802802
// Write value type
803803
const int32_t v_type_i = (int32_t)v_l[il]->type;
@@ -944,7 +944,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
944944

945945
// For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
946946
for (uint32_t il = 0; il < n_layer; ++il) {
947-
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
947+
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
948948

949949
// Read type of key
950950
int32_t k_type_i_ref;
@@ -972,7 +972,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
972972

973973
if (!v_trans) {
974974
for (uint32_t il = 0; il < n_layer; ++il) {
975-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
975+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
976976

977977
// Read type of value
978978
int32_t v_type_i_ref;
@@ -1000,7 +1000,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
10001000
} else {
10011001
// For each layer, read the values for each cell (transposed)
10021002
for (uint32_t il = 0; il < n_layer; ++il) {
1003-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1003+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
10041004

10051005
// Read type of value
10061006
int32_t v_type_i_ref;

src/llama-kv-cache-unified.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,8 @@ llama_kv_cache_unified::llama_kv_cache_unified(
6868
continue;
6969
}
7070

71-
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
72-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
71+
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
72+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
7373

7474
const char * dev_name = "CPU";
7575

@@ -1410,7 +1410,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
14101410
for (const auto & layer : layers) {
14111411
const uint32_t il = layer.il;
14121412

1413-
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
1413+
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
14141414

14151415
// Write key type
14161416
const int32_t k_type_i = (int32_t)layer.k->type;
@@ -1432,7 +1432,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
14321432
for (const auto & layer : layers) {
14331433
const uint32_t il = layer.il;
14341434

1435-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1435+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
14361436

14371437
// Write value type
14381438
const int32_t v_type_i = (int32_t)layer.v->type;
@@ -1456,7 +1456,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
14561456
for (const auto & layer : layers) {
14571457
const uint32_t il = layer.il;
14581458

1459-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1459+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
14601460

14611461
// Write value type
14621462
const int32_t v_type_i = (int32_t)layer.v->type;
@@ -1599,7 +1599,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
15991599
for (const auto & layer : layers) {
16001600
const uint32_t il = layer.il;
16011601

1602-
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
1602+
const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(il);
16031603

16041604
// Read type of key
16051605
int32_t k_type_i_ref;
@@ -1629,7 +1629,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
16291629
for (const auto & layer : layers) {
16301630
const uint32_t il = layer.il;
16311631

1632-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1632+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
16331633

16341634
// Read type of value
16351635
int32_t v_type_i_ref;
@@ -1659,7 +1659,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
16591659
for (const auto & layer : layers) {
16601660
const uint32_t il = layer.il;
16611661

1662-
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
1662+
const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(il);
16631663

16641664
// Read type of value
16651665
int32_t v_type_i_ref;

0 commit comments

Comments
 (0)