Skip to content

Commit 6c662aa

Browse files
committed
Readd variable attn_k, attn_q, attn_o after merge
1 parent 808dcdf commit 6c662aa

File tree

1 file changed

+3
-0
lines changed

1 file changed

+3
-0
lines changed

src/llama.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17171,6 +17171,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1717117171
n_attn_layer *= 3;
1717217172
}
1717317173
GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
17174+
GGML_ASSERT((qs.n_attention_wk == n_attn_layer) && "n_attention_wk is unexpected");
17175+
GGML_ASSERT((qs.n_attention_wq == n_attn_layer) && "n_attention_wq is unexpected");
17176+
GGML_ASSERT((qs.n_attention_wo == n_attn_layer) && "n_attention_wo is unexpected");
1717417177
}
1717517178

1717617179
size_t total_size_org = 0;

0 commit comments

Comments
 (0)