@@ -75,8 +75,8 @@ llama_kv_cache_unified::llama_kv_cache_unified(
75
75
continue ;
76
76
}
77
77
78
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s ();
79
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
78
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s (il );
79
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
80
80
81
81
const char * dev_name = " CPU" ;
82
82
@@ -1369,7 +1369,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
1369
1369
for (const auto & layer : layers) {
1370
1370
const uint32_t il = layer.il ;
1371
1371
1372
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s ();
1372
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s (il );
1373
1373
1374
1374
// Write key type
1375
1375
const int32_t k_type_i = (int32_t )layer.k ->type ;
@@ -1391,7 +1391,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
1391
1391
for (const auto & layer : layers) {
1392
1392
const uint32_t il = layer.il ;
1393
1393
1394
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
1394
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
1395
1395
1396
1396
// Write value type
1397
1397
const int32_t v_type_i = (int32_t )layer.v ->type ;
@@ -1415,7 +1415,7 @@ void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::
1415
1415
for (const auto & layer : layers) {
1416
1416
const uint32_t il = layer.il ;
1417
1417
1418
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
1418
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
1419
1419
1420
1420
// Write value type
1421
1421
const int32_t v_type_i = (int32_t )layer.v ->type ;
@@ -1552,7 +1552,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
1552
1552
for (const auto & layer : layers) {
1553
1553
const uint32_t il = layer.il ;
1554
1554
1555
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s ();
1555
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s (il );
1556
1556
1557
1557
// Read type of key
1558
1558
int32_t k_type_i_ref;
@@ -1582,7 +1582,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
1582
1582
for (const auto & layer : layers) {
1583
1583
const uint32_t il = layer.il ;
1584
1584
1585
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
1585
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
1586
1586
1587
1587
// Read type of value
1588
1588
int32_t v_type_i_ref;
@@ -1612,7 +1612,7 @@ bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell
1612
1612
for (const auto & layer : layers) {
1613
1613
const uint32_t il = layer.il ;
1614
1614
1615
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
1615
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
1616
1616
1617
1617
// Read type of value
1618
1618
int32_t v_type_i_ref;
@@ -1921,8 +1921,8 @@ llama_kv_cache_recurrent::llama_kv_cache_recurrent(
1921
1921
continue ;
1922
1922
}
1923
1923
1924
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (i) + hparams.n_embd_k_s ();
1925
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (i) + hparams.n_embd_v_s ();
1924
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (i) + hparams.n_embd_k_s (i );
1925
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (i) + hparams.n_embd_v_s (i );
1926
1926
1927
1927
const char * dev_name = " CPU" ;
1928
1928
@@ -2649,7 +2649,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
2649
2649
// Iterate and write all the keys first, each row is a cell
2650
2650
// Get whole range at a time
2651
2651
for (uint32_t il = 0 ; il < n_layer; ++il) {
2652
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s ();
2652
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s (il );
2653
2653
2654
2654
// Write key type
2655
2655
const int32_t k_type_i = (int32_t )k_l[il]->type ;
@@ -2669,7 +2669,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
2669
2669
2670
2670
if (!v_trans) {
2671
2671
for (uint32_t il = 0 ; il < n_layer; ++il) {
2672
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
2672
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
2673
2673
2674
2674
// Write value type
2675
2675
const int32_t v_type_i = (int32_t )v_l[il]->type ;
@@ -2690,7 +2690,7 @@ void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std
2690
2690
// When v is transposed, we also need the element size and get the element ranges from each row
2691
2691
const uint32_t kv_size = size;
2692
2692
for (uint32_t il = 0 ; il < n_layer; ++il) {
2693
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
2693
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
2694
2694
2695
2695
// Write value type
2696
2696
const int32_t v_type_i = (int32_t )v_l[il]->type ;
@@ -2837,7 +2837,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
2837
2837
2838
2838
// For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
2839
2839
for (uint32_t il = 0 ; il < n_layer; ++il) {
2840
- const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s ();
2840
+ const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa (il) + hparams.n_embd_k_s (il );
2841
2841
2842
2842
// Read type of key
2843
2843
int32_t k_type_i_ref;
@@ -2865,7 +2865,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
2865
2865
2866
2866
if (!v_trans) {
2867
2867
for (uint32_t il = 0 ; il < n_layer; ++il) {
2868
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
2868
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
2869
2869
2870
2870
// Read type of value
2871
2871
int32_t v_type_i_ref;
@@ -2893,7 +2893,7 @@ bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t ce
2893
2893
} else {
2894
2894
// For each layer, read the values for each cell (transposed)
2895
2895
for (uint32_t il = 0 ; il < n_layer; ++il) {
2896
- const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s ();
2896
+ const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa (il) + hparams.n_embd_v_s (il );
2897
2897
2898
2898
// Read type of value
2899
2899
int32_t v_type_i_ref;
0 commit comments