Skip to content

Commit 94d7418

Browse files
committed
Tiled approach for F32
1 parent e69f0c7 commit 94d7418

File tree

2 files changed

+53
-138
lines changed

2 files changed

+53
-138
lines changed

ggml/src/ggml-cpu/ops.cpp

Lines changed: 52 additions & 138 deletions
Original file line numberDiff line numberDiff line change
@@ -6163,18 +6163,21 @@ static void ggml_call_mul_mat(
61636163

61646164
// ggml_compute_forward_conv_2d
61656165

6166-
static void ggml_compute_forward_conv_2d_f32(const ggml_compute_params * params,
6167-
ggml_tensor * dst) {
6168-
6169-
const ggml_tensor * src = dst->src[1]; // [W H C_in N]
6170-
const ggml_tensor * kernel = dst->src[0]; // [W H C_in C_out]
6166+
static void ggml_compute_forward_conv_2d_f32(
6167+
const ggml_compute_params * params,
6168+
const ggml_tensor * kernel, // [KW, KH, IC, OC] - fp32
6169+
const ggml_tensor * src, // [W, H, C, N]
6170+
ggml_tensor * dst) { // [OW, OH, OC, N]
61716171

61726172
GGML_ASSERT(ggml_is_contiguous(kernel));
6173+
GGML_ASSERT(kernel->type == GGML_TYPE_F32);
61736174

6174-
const int32_t stride_x = dst->op_params[0];
6175-
const int32_t stride_y = dst->op_params[1];
6176-
const int32_t pad_x = dst->op_params[2];
6177-
const int32_t pad_y = dst->op_params[3];
6175+
const int32_t stride_x = dst->op_params[0];
6176+
const int32_t stride_y = dst->op_params[1];
6177+
const int32_t pad_x = dst->op_params[2];
6178+
const int32_t pad_y = dst->op_params[3];
6179+
const int32_t dilation_x = dst->op_params[4];
6180+
const int32_t dilation_y = dst->op_params[5];
61786181

61796182
const int64_t c_in = src->ne[2];
61806183
const int64_t c_out = kernel->ne[3];
@@ -6187,193 +6190,104 @@ static void ggml_compute_forward_conv_2d_f32(const ggml_compute_params * params,
61876190
const int64_t dst_w = dst->ne[0];
61886191
const int64_t dst_h = dst->ne[1];
61896192

6190-
6191-
float * src_data = (float *) src->data;
6192-
float * knl_data = (float *) kernel->data;
6193-
float * dst_data = ( float *) dst->data;
6194-
6193+
float * src_data = (float*) src->data;
6194+
float * knl_data = (float*) kernel->data;
6195+
float * dst_data = (float*) dst->data;
61956196

61966197
const int64_t knl_n = knl_w * knl_h * c_in;
61976198
const int64_t patch_total = dst->ne[3] * dst_w * dst_h;
6198-
6199-
6200-
6201-
const int64_t space_per_patch = knl_n * sizeof(float) + patch_total * c_out * sizeof(float);
62026199

6203-
const int64_t batch_size = params->wsize / space_per_patch;
6200+
const int64_t space_per_patch = knl_n * sizeof(float) + c_out * sizeof(float);
6201+
const int64_t batch_size = params->wsize / space_per_patch;
62046202
const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size;
6205-
const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch;
6206-
6203+
const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch;
62076204

62086205
GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1);
62096206

6210-
float * tmp = (float *) params->wdata; // per-thread scratch
6207+
float * tmp = (float *) params->wdata;
62116208

62126209
for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) {
62136210

62146211
const int64_t patch_start_batch = batch_i * patches_per_batch;
62156212
const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch,
62166213
patch_total);
6217-
const int64_t patch_n = patch_end_batch - patch_start_batch;
6214+
const int64_t patch_n = patch_end_batch - patch_start_batch;
62186215

6219-
const int64_t patch_per_thread =
6220-
(patch_n + params->nth - 1) / params->nth;
6221-
const int64_t patch_start = patch_start_batch +
6222-
params->ith * patch_per_thread;
6223-
const int64_t patch_end = std::min(patch_start + patch_per_thread,
6224-
patch_end_batch);
6216+
const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth;
6217+
const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread;
6218+
const int64_t patch_end = std::min(patch_start + patch_per_thread,patch_end_batch);
62256219

62266220
//im2col for a patch
62276221
for (int64_t p = patch_start; p < patch_end; ++p) {
6228-
const int64_t b = p / (dst_w * dst_h);
6229-
const int64_t dy = (p / dst_w) % dst_h;
6230-
const int64_t dx = p % dst_w;
6222+
const int64_t batch_n = p / (dst_w * dst_h);
6223+
const int64_t src_x = (p / dst_w) % dst_h;
6224+
const int64_t src_y = p % dst_w;
62316225

6232-
const float * src_base = (const float *)((char *)src_data + b * src->nb[3]);
6233-
float * out_row = tmp + (p % patches_per_batch) * knl_n;
6226+
float * src_base = (float *)((char *)src_data + batch_n * src->nb[3]);
6227+
float * dst_row = tmp + (p % patches_per_batch) * knl_n;
62346228

6235-
// Extract patch in IC,KH,KW order (same as im2col)
62366229
for (int64_t ic = 0; ic < c_in; ++ic) {
62376230
for (int64_t ky = 0; ky < knl_h; ++ky) {
62386231
for (int64_t kx = 0; kx < knl_w; ++kx) {
6239-
const int64_t sy = dy * stride_y + ky - pad_y;
6240-
const int64_t sx = dx * stride_x + kx - pad_x;
6241-
6232+
const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y;
6233+
const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x;
6234+
62426235
int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx;
6243-
6236+
62446237
if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) {
6245-
out_row[dst_idx] = 0.0f;
6238+
dst_row[dst_idx] = 0.0f;
62466239
} else {
6247-
float * src_ptr = (float *)((char *)src_base +
6248-
sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]);
6249-
out_row[dst_idx] = *src_ptr;
6240+
float * src_ptr = (float *)((char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]);
6241+
dst_row[dst_idx] = *src_ptr;
62506242
}
62516243
}
62526244
}
62536245
}
62546246
} // patches handled by this thread
62556247

6256-
ggml_barrier(params->threadpool); // wait for all threads
6248+
ggml_barrier(params->threadpool);
62576249

6258-
//GEMM output is patch_n * cout
62596250
float * gemm_output = tmp + patches_per_batch * knl_n;
6260-
6251+
62616252
// GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out]
62626253
ggml_call_mul_mat(params, patch_n, c_out, knl_n,
62636254
tmp, knl_data, gemm_output);
6264-
6265-
// Barrier to ensure GEMM completes before permutation
6255+
62666256
ggml_barrier(params->threadpool);
6267-
6268-
// Distribute permutation work across threads
6257+
6258+
6259+
//permute back [OC, N, OH, OW] to [N, OC, OH, OW]
62696260
const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth;
62706261
const int64_t permute_start = params->ith * permute_per_thread;
62716262
const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n);
6272-
6273-
// Each thread handles part of the permutation from [patch_n, c_out] to WHCN layout
6263+
62746264
for (int64_t i = permute_start; i < permute_end; ++i) {
6275-
const int64_t p = patch_start_batch + i;
6276-
const int64_t b = p / (dst_w * dst_h); // batch index
6277-
const int64_t dy = (p / dst_w) % dst_h; // height index
6278-
const int64_t dx = p % dst_w; // width index
6279-
6280-
// Copy all channels for this spatial position
6265+
const int64_t p = patch_start_batch + i;
6266+
const int64_t batch_n = p / (dst_w * dst_h);
6267+
const int64_t dst_y = (p / dst_w) % dst_h;
6268+
const int64_t dst_x = p % dst_w;
6269+
62816270
for (int64_t oc = 0; oc < c_out; ++oc) {
62826271
const float value = gemm_output[i * c_out + oc];
62836272
// Write to WHCN layout: dst[w, h, c, n]
6284-
float * dst_ptr = (float *)((char *)dst_data +
6285-
dx * dst->nb[0] + dy * dst->nb[1] + oc * dst->nb[2] + b * dst->nb[3]);
6273+
float * dst_ptr = (float *)((char *)dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + oc * dst->nb[2] + batch_n * dst->nb[3]);
62866274
*dst_ptr = value;
62876275
}
62886276
}
62896277
}
62906278
}
62916279

6292-
static void ggml_compute_forward_conv_2d_f16(
6293-
const ggml_compute_params * params,
6294-
const ggml_tensor * kernel, // [KW, KH, IC, OC]
6295-
const ggml_tensor * src, // [W, H, C, N]
6296-
ggml_tensor * dst) { // [OW, OH, OC, N]
6297-
6298-
const int32_t s0 = ggml_get_op_params_i32(dst, 0);
6299-
const int32_t s1 = ggml_get_op_params_i32(dst, 1);
6300-
const int32_t p0 = ggml_get_op_params_i32(dst, 2);
6301-
const int32_t p1 = ggml_get_op_params_i32(dst, 3);
6302-
const int32_t d0 = ggml_get_op_params_i32(dst, 4);
6303-
const int32_t d1 = ggml_get_op_params_i32(dst, 5);
6304-
6305-
const int64_t OW = dst->ne[0];
6306-
const int64_t OH = dst->ne[1];
6307-
const int64_t OC = dst->ne[2];
6308-
const int64_t N = dst->ne[3];
6309-
6310-
const int64_t IW = src->ne[0];
6311-
const int64_t IH = src->ne[1];
6312-
const int64_t IC = src->ne[2];
6313-
6314-
const int64_t KW = kernel->ne[0];
6315-
const int64_t KH = kernel->ne[1];
6316-
6317-
const ggml_fp16_t * kernel_data = (const ggml_fp16_t *)kernel->data;
6318-
const ggml_fp16_t * src_data = (const ggml_fp16_t *)src->data;
6319-
ggml_fp16_t * dst_data = (ggml_fp16_t *)dst->data;
6320-
6321-
const int64_t rows_total = OH * N;
6322-
const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth;
6323-
const int64_t row_start = params->ith * rows_per_thread;
6324-
const int64_t row_end = MIN(row_start + rows_per_thread, rows_total);
6325-
6326-
for (int64_t row = row_start; row < row_end; ++row) {
6327-
const int64_t oh = row % OH;
6328-
const int64_t n = row / OH;
6329-
const ggml_fp16_t * src_batch = src_data + n * IW * IH * IC;
6330-
6331-
for (int64_t ow = 0; ow < OW; ++ow) {
6332-
for (int64_t oc = 0; oc < OC; ++oc) {
6333-
float sum = 0.0f;
6334-
const ggml_fp16_t * kernel_channel = kernel_data + oc * KW * KH * IC;
6335-
for (int64_t kh = 0; kh < KH; ++kh) {
6336-
const int64_t ih = oh * s1 - p1 + kh * d1;
6337-
if (ih < 0 || ih >= IH) continue;
6338-
6339-
for (int64_t kw = 0; kw < KW; ++kw) {
6340-
const int64_t iw = ow * s0 - p0 + kw * d0;
6341-
if (iw < 0 || iw >= IW) continue;
6342-
6343-
for (int64_t ic = 0; ic < IC; ++ic) {
6344-
const ggml_fp16_t * kernel_ptr = kernel_channel + (kh * KW + kw) + ic * KW * KH;
6345-
const ggml_fp16_t * src_ptr = src_batch + (ih * IW + iw) + ic * IW * IH;
6346-
sum += GGML_FP16_TO_FP32(*kernel_ptr) * GGML_FP16_TO_FP32(*src_ptr);
6347-
}
6348-
}
6349-
}
6350-
6351-
dst_data[((n * OC + oc) * OH + oh) * OW + ow] = GGML_FP32_TO_FP16(sum);
6352-
}
6353-
}
6354-
}
6355-
}
6356-
63576280
void ggml_compute_forward_conv_2d(
63586281
const ggml_compute_params * params,
63596282
ggml_tensor * dst) {
63606283

63616284
const ggml_tensor * src0 = dst->src[0];
63626285
const ggml_tensor * src1 = dst->src[1];
63636286

6364-
switch (src0->type) {
6365-
case GGML_TYPE_F16:
6366-
{
6367-
ggml_compute_forward_conv_2d_f16(params, src0, src1, dst);
6368-
} break;
6369-
case GGML_TYPE_F32:
6370-
{
6371-
ggml_compute_forward_conv_2d_f32(params, dst);
6372-
} break;
6373-
default:
6374-
{
6375-
GGML_ABORT("fatal error");
6376-
}
6287+
if (src0->type == GGML_TYPE_F16) {
6288+
GGML_ASSERT(false && "F16 not supported yet");
6289+
} else {
6290+
ggml_compute_forward_conv_2d_f32(params, src0, src1, dst);
63776291
}
63786292
}
63796293

tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ endif()
195195
# llama_build_and_test(test-opt.cpp) # SLOW
196196
llama_build_and_test(test-gguf.cpp)
197197
llama_build_and_test(test-backend-ops.cpp)
198+
llama_build_and_test(test_conv2d_comparison.cpp)
198199

199200
llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
200201
llama_build_and_test(test-autorelease.cpp LABEL "model")

0 commit comments

Comments
 (0)