File tree Expand file tree Collapse file tree 4 files changed +5
-5
lines changed Expand file tree Collapse file tree 4 files changed +5
-5
lines changed Original file line number Diff line number Diff line change @@ -558,7 +558,7 @@ extern "C" {
558
558
LLAMA_API bool llama_model_is_recurrent (const struct llama_model * model);
559
559
560
560
// Returns true if the model is hybrid-recurrent (like Jamba, Bamba, etc.)
561
- LLAMA_API bool llama_model_is_hybrid (const struct llama_model * model);
561
+ LLAMA_API bool llama_model_is_hybrid_recurrent (const struct llama_model * model);
562
562
563
563
// Returns 0 on success
564
564
LLAMA_API uint32_t llama_model_quantize (
Original file line number Diff line number Diff line change @@ -1762,7 +1762,7 @@ bool llm_arch_is_recurrent(const llm_arch & arch) {
1762
1762
}
1763
1763
}
1764
1764
1765
- bool llm_arch_is_hybrid (const llm_arch & arch) {
1765
+ bool llm_arch_is_hybrid_recurrent (const llm_arch & arch) {
1766
1766
// TODO: There are currently no hybrid models! Once there are, this will be
1767
1767
// the place to identify them
1768
1768
switch (arch) {
Original file line number Diff line number Diff line change @@ -440,4 +440,4 @@ llm_arch llm_arch_from_string(const std::string & name);
440
440
const llm_tensor_info & llm_tensor_info_for (llm_tensor tensor);
441
441
442
442
bool llm_arch_is_recurrent (const llm_arch& arch);
443
- bool llm_arch_is_hybrid (const llm_arch& arch);
443
+ bool llm_arch_is_hybrid_recurrent (const llm_arch& arch);
Original file line number Diff line number Diff line change @@ -13811,8 +13811,8 @@ bool llama_model_is_recurrent(const llama_model * model) {
13811
13811
return llm_arch_is_recurrent(model->arch);
13812
13812
}
13813
13813
13814
- bool llama_model_is_hybrid (const llama_model * model) {
13815
- return llm_arch_is_hybrid (model->arch);
13814
+ bool llama_model_is_hybrid_recurrent (const llama_model * model) {
13815
+ return llm_arch_is_hybrid_recurrent (model->arch);
13816
13816
}
13817
13817
13818
13818
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
You can’t perform that action at this time.
0 commit comments