@@ -11779,7 +11779,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
11779
11779
ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
11780
11780
}
11781
11781
11782
- if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
11782
+ if (!cparams.embeddings || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
11783
11783
GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
11784
11784
const int64_t n_tokens = batch.n_tokens;
11785
11785
@@ -11811,7 +11811,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
11811
11811
// (!a || b) is a logical implication (a -> b)
11812
11812
// !hparams.causal_attn -> !cparams.causal_attn
11813
11813
(hparams.causal_attn || !cparams.causal_attn) &&
11814
- "causal attention with embedding models is not supported"
11814
+ "causal attention is not supported by this model "
11815
11815
);
11816
11816
11817
11817
if (lctx.inp_KQ_mask) {
@@ -12036,7 +12036,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
12036
12036
12037
12037
// TODO: use a per-batch flag for logits presence instead
12038
12038
const bool has_logits = !cparams.embeddings;
12039
- const bool has_embd = cparams.embeddings;
12039
+ const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) ;
12040
12040
12041
12041
const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
12042
12042
const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0;
0 commit comments