@@ -2706,7 +2706,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
2706
2706
ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1);
2707
2707
2708
2708
ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1);
2709
- ggml_vk_create_pipeline(device, device->pipeline_flash_attn_split_k_reduce, "fa_split_k_reduce", fa_split_k_reduce_len, fa_split_k_reduce_data, "main", 2, 3 * sizeof(uint32_t), {1, 1 , 1}, {}, 1, true);
2709
+ ggml_vk_create_pipeline(device, device->pipeline_flash_attn_split_k_reduce, "fa_split_k_reduce", fa_split_k_reduce_len, fa_split_k_reduce_data, "main", 2, 4 * sizeof(uint32_t), {1, device->subgroup_size , 1}, {device->subgroup_size }, 1, true);
2710
2710
ggml_vk_create_pipeline(device, device->pipeline_quantize_q8_1, "quantize_q8_1", quantize_q8_1_len, quantize_q8_1_data, "main", 2, 1 * sizeof(uint32_t), {32 * device->subgroup_size / 8, 1, 1}, { device->subgroup_size }, 1);
2711
2711
2712
2712
for (uint32_t i = 0; i < p021_max_gqa_ratio; ++i) {
@@ -6252,13 +6252,13 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx
6252
6252
const uint32_t shader_core_count = ctx->device->shader_core_count ? ctx->device->shader_core_count : 16;
6253
6253
6254
6254
// Try to use split_k when KV is large enough to be worth the overhead
6255
- if (workgroups_x == 1 && shader_core_count > 0 && KV >= 512 ) {
6255
+ if (workgroups_x == 1 && shader_core_count > 0) {
6256
6256
// Try to run two workgroups per SM.
6257
6257
split_k = shader_core_count * 2 / (workgroups_y * workgroups_z);
6258
6258
if (split_k > 1) {
6259
6259
// Try to evenly split KV into split_k chunks, but it needs to be a multiple
6260
6260
// of "align", so recompute split_k based on that.
6261
- split_kv = ROUNDUP_POW2(KV / split_k, pipelines[1]->align);
6261
+ split_kv = ROUNDUP_POW2(std::max(1u, KV / split_k) , pipelines[1]->align);
6262
6262
split_k = CEIL_DIV(KV, split_kv);
6263
6263
workgroups_x = split_k;
6264
6264
}
@@ -6392,7 +6392,7 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx
6392
6392
vk_subbuffer{ctx->prealloc_split_k, 0, VK_WHOLE_SIZE},
6393
6393
vk_subbuffer{d_D, d_buf_offset, VK_WHOLE_SIZE},
6394
6394
},
6395
- pc2, { (uint32_t)ne1, 1 , (uint32_t)ne3 });
6395
+ pc2, { (uint32_t)ne1, HSV , (uint32_t)ne3 });
6396
6396
} else {
6397
6397
ggml_vk_dispatch_pipeline(ctx, subctx, pipeline,
6398
6398
{
0 commit comments