Skip to content

Commit eacdeb5

Browse files
authored
model : fix build after merge conflict (#14754)
1 parent e0cb5c5 commit eacdeb5

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/llama-model.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13530,7 +13530,7 @@ struct llm_build_exaone : public llm_graph_context {
1353013530

1353113531
template <bool iswa>
1353213532
struct llm_build_exaone4 : public llm_graph_context {
13533-
llm_build_exaone4(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) {
13533+
llm_build_exaone4(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
1353413534
const int64_t n_embd_head = hparams.n_embd_head_k;
1353513535

1353613536
GGML_ASSERT(n_embd_head == hparams.n_embd_head_v);
@@ -13603,7 +13603,7 @@ struct llm_build_exaone4 : public llm_graph_context {
1360313603
cb(Kcur, "Kcur", il);
1360413604
cb(Vcur, "Vcur", il);
1360513605

13606-
cur = build_attn(inp_attn, gf,
13606+
cur = build_attn(inp_attn,
1360713607
model.layers[il].wo, NULL,
1360813608
Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
1360913609
cb(cur, "attn_out", il);
@@ -17352,9 +17352,9 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
1735217352
case LLM_ARCH_EXAONE4:
1735317353
{
1735417354
if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
17355-
llm = std::make_unique<llm_build_exaone4<true>>(*this, params, gf);
17355+
llm = std::make_unique<llm_build_exaone4<true>>(*this, params);
1735617356
} else {
17357-
llm = std::make_unique<llm_build_exaone4<false>>(*this, params, gf);
17357+
llm = std::make_unique<llm_build_exaone4<false>>(*this, params);
1735817358
}
1735917359
} break;
1736017360
case LLM_ARCH_RWKV6:

0 commit comments

Comments
 (0)