We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2ca3416 commit 3c22e1dCopy full SHA for 3c22e1d
src/llama-model.cpp
@@ -13263,7 +13263,7 @@ struct llm_build_hybrid_mamba : public llm_graph_context {
13263
}
13264
13265
// Extract the recurrent cache from the hybrid parent
13266
- const auto * kv_recurrent = static_cast<const llama_kv_cache_hybrid *>(memory)->get_child_cache<llama_kv_cache_recurrent>();
+ const auto * kv_recurrent = static_cast<const llama_kv_cache_hybrid_recurrent *>(memory)->get_kv_recurrent();
13267
GGML_ASSERT(kv_recurrent);
13268
13269
for (int il = 0; il < n_layer; ++il) {
0 commit comments