@@ -413,13 +413,6 @@ void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
413
413
}
414
414
}
415
415
416
- llm_graph_input_attn_kv_hybrid_recurrent::llm_graph_input_attn_kv_hybrid_recurrent (
417
- const llama_hparams & hparams,
418
- const llama_cparams & cparams,
419
- const llama_kv_cache_hybrid_recurrent_state * kv_state) :
420
- llm_graph_input_attn_kv_unified(hparams, cparams, kv_state->get_state_attn ()) {
421
- }
422
-
423
416
//
424
417
// llm_graph_context
425
418
//
@@ -1280,7 +1273,9 @@ ggml_tensor * llm_graph_context::build_attn(
1280
1273
ggml_build_forward_expand (gf, k_cur);
1281
1274
ggml_build_forward_expand (gf, v_cur);
1282
1275
1283
- const auto * kv_state = static_cast <const llama_kv_cache_unified_state *>(mstate);
1276
+ // NOTE: For hybrid caches, this may be a child of mstate, so we use the one
1277
+ // encapsulated in inp
1278
+ const auto * kv_state = inp->kv_state ;
1284
1279
1285
1280
// store to KV cache
1286
1281
{
@@ -1312,10 +1307,10 @@ ggml_tensor * llm_graph_context::build_attn(
1312
1307
return cur;
1313
1308
}
1314
1309
1315
- llm_graph_input_attn_kv_hybrid_recurrent * llm_graph_context::build_attn_inp_kv_hybrid_recurrent () const {
1310
+ llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_hybrid_recurrent () const {
1316
1311
const auto * kv_state = static_cast <const llama_kv_cache_hybrid_recurrent_state *>(mstate);
1317
1312
1318
- auto inp = std::make_unique<llm_graph_input_attn_kv_hybrid_recurrent >(hparams, cparams, kv_state);
1313
+ auto inp = std::make_unique<llm_graph_input_attn_kv_unified >(hparams, cparams, kv_state-> get_state_attn () );
1319
1314
1320
1315
{
1321
1316
GGML_ASSERT (hparams.swa_type == LLAMA_SWA_TYPE_NONE && " Hybrid recurrent is not supported with SWA attention layers" );
@@ -1329,25 +1324,7 @@ llm_graph_input_attn_kv_hybrid_recurrent * llm_graph_context::build_attn_inp_kv_
1329
1324
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast (ctx0, inp->self_kq_mask , GGML_TYPE_F16) : inp->self_kq_mask ;
1330
1325
}
1331
1326
1332
- return (llm_graph_input_attn_kv_hybrid_recurrent *) res->add_input (std::move (inp));
1333
- }
1334
-
1335
- ggml_tensor * llm_graph_context::build_attn (
1336
- llm_graph_input_attn_kv_hybrid_recurrent * inp,
1337
- ggml_cgraph * gf,
1338
- ggml_tensor * wo,
1339
- ggml_tensor * wo_b,
1340
- ggml_tensor * q_cur,
1341
- ggml_tensor * k_cur,
1342
- ggml_tensor * v_cur,
1343
- ggml_tensor * kq_b,
1344
- ggml_tensor * v_mla,
1345
- float kq_scale,
1346
- int il) const {
1347
- return build_attn (
1348
- static_cast <llm_graph_input_attn_kv_unified *>(inp),
1349
- gf, wo, wo_b, q_cur, k_cur, v_cur, kq_b, v_mla, kq_scale, il
1350
- );
1327
+ return (llm_graph_input_attn_kv_unified *) res->add_input (std::move (inp));
1351
1328
}
1352
1329
1353
1330
llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa () const {
@@ -1490,13 +1467,17 @@ ggml_tensor * llm_graph_context::build_attn(
1490
1467
}
1491
1468
1492
1469
ggml_tensor * llm_graph_context::build_copy_mask_state (
1493
- ggml_cgraph * gf,
1494
- ggml_tensor * s,
1495
- ggml_tensor * state_copy,
1496
- ggml_tensor * state_mask,
1497
- int32_t n_state,
1498
- int32_t n_seqs) const {
1499
- const auto * kv_state = static_cast <const llama_kv_cache_recurrent_state *>(mstate);
1470
+ ggml_cgraph * gf,
1471
+ ggml_tensor * s,
1472
+ ggml_tensor * state_copy,
1473
+ ggml_tensor * state_mask,
1474
+ int32_t n_state,
1475
+ int32_t n_seqs,
1476
+ const llama_kv_cache_recurrent_state * kv_state) const {
1477
+
1478
+ if (kv_state == nullptr ) {
1479
+ kv_state = static_cast <const llama_kv_cache_recurrent_state *>(mstate);
1480
+ }
1500
1481
1501
1482
const auto n_kv = kv_state->get_n_kv ();
1502
1483
const auto kv_head = kv_state->get_head ();
0 commit comments