@@ -4890,6 +4890,7 @@ static void llm_load_hparams(
4890
4890
} break;
4891
4891
case LLM_ARCH_PHI3:
4892
4892
{
4893
+ ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
4893
4894
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
4894
4895
4895
4896
switch (hparams.n_layer) {
@@ -10749,7 +10750,7 @@ struct llm_build_context {
10749
10750
struct ggml_tensor * inp_pos = build_inp_pos();
10750
10751
10751
10752
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
10752
- struct ggml_tensor * KQ_mask = build_inp_KQ_mask ();
10753
+ struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa ();
10753
10754
10754
10755
for (int il = 0; il < n_layer; ++il) {
10755
10756
auto residual = inpL;
@@ -10807,7 +10808,7 @@ struct llm_build_context {
10807
10808
10808
10809
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
10809
10810
model.layers[il].wo, model.layers[il].bo,
10810
- Kcur, Vcur, Qcur, KQ_mask , n_tokens, kv_head, n_kv, 1.0f, cb, il);
10811
+ Kcur, Vcur, Qcur, KQ_mask_swa , n_tokens, kv_head, n_kv, 1.0f, cb, il);
10811
10812
}
10812
10813
10813
10814
if (il == n_layer - 1) {
@@ -14014,18 +14015,23 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
14014
14015
"causal attention is not supported by this model"
14015
14016
);
14016
14017
14017
- if (lctx.inp_KQ_mask) {
14018
+ if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa ) {
14018
14019
// NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
14019
14020
if (cparams.causal_attn && !lctx.is_encoding) {
14020
14021
const int64_t n_kv = kv_self.n;
14021
14022
const int64_t n_tokens = batch.n_tokens;
14022
14023
14023
- GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
14024
14024
14025
- float * data = (float *) lctx.inp_KQ_mask->data ;
14025
+ float * data = nullptr ;
14026
14026
float * data_swa = nullptr;
14027
14027
14028
+ if (lctx.inp_KQ_mask) {
14029
+ GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
14030
+ data = (float *) lctx.inp_KQ_mask->data;
14031
+ }
14032
+
14028
14033
if (lctx.inp_KQ_mask_swa) {
14034
+ GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer));
14029
14035
data_swa = (float *) lctx.inp_KQ_mask_swa->data;
14030
14036
}
14031
14037
@@ -14048,7 +14054,10 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
14048
14054
f = 0.0f;
14049
14055
}
14050
14056
}
14051
- data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
14057
+
14058
+ if (data) {
14059
+ data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
14060
+ }
14052
14061
14053
14062
// may need to cut off old tokens for sliding window
14054
14063
if (data_swa) {
@@ -14060,9 +14069,19 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
14060
14069
}
14061
14070
}
14062
14071
14063
- for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
14064
- for (int j = 0; j < n_kv; ++j) {
14065
- data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
14072
+ if (data) {
14073
+ for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
14074
+ for (int j = 0; j < n_kv; ++j) {
14075
+ data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
14076
+ }
14077
+ }
14078
+ }
14079
+
14080
+ if (data_swa) {
14081
+ for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
14082
+ for (int j = 0; j < n_kv; ++j) {
14083
+ data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
14084
+ }
14066
14085
}
14067
14086
}
14068
14087
}
0 commit comments