Skip to content

Commit 4521708

Browse files
committed
refactor: rename *_is_hybrid -> *_is_hybrid_recurrent
The implementation of the hybrid cache intentionally does not specify the types of the child caches, so there was a naming mismatch with these predicate functions that used "hybrid" to imply "hybrid recurrent." Branch: HybridCache Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
1 parent 6a397e5 commit 4521708

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

include/llama.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ extern "C" {
570570
LLAMA_API bool llama_model_is_recurrent(const struct llama_model * model);
571571

572572
// Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.)
573-
LLAMA_API bool llama_model_is_hybrid(const struct llama_model * model);
573+
LLAMA_API bool llama_model_is_hybrid_recurrent(const struct llama_model * model);
574574

575575
// Returns 0 on success
576576
LLAMA_API uint32_t llama_model_quantize(

src/llama-arch.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1767,7 +1767,7 @@ bool llm_arch_is_recurrent(const llm_arch & arch) {
17671767
}
17681768
}
17691769

1770-
bool llm_arch_is_hybrid(const llm_arch & arch) {
1770+
bool llm_arch_is_hybrid_recurrent(const llm_arch & arch) {
17711771
// TODO: There are currently no hybrid models! Once there are, this will be
17721772
// the place to identify them
17731773
switch (arch) {

src/llama-arch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -439,4 +439,4 @@ llm_arch llm_arch_from_string(const std::string & name);
439439
const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor);
440440

441441
bool llm_arch_is_recurrent(const llm_arch& arch);
442-
bool llm_arch_is_hybrid(const llm_arch& arch);
442+
bool llm_arch_is_hybrid_recurrent(const llm_arch& arch);

src/llama-model.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13828,8 +13828,8 @@ bool llama_model_is_recurrent(const llama_model * model) {
1382813828
return llm_arch_is_recurrent(model->arch);
1382913829
}
1383013830

13831-
bool llama_model_is_hybrid(const llama_model * model) {
13832-
return llm_arch_is_hybrid(model->arch);
13831+
bool llama_model_is_hybrid_recurrent(const llama_model * model) {
13832+
return llm_arch_is_hybrid_recurrent(model->arch);
1383313833
}
1383413834

1383513835
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {

0 commit comments

Comments
 (0)