Skip to content

Commit e3783fd

Browse files
committed
ggml-cpu : Add GGML_CPU_FFAST_MATH for sine autovectorization
1 parent 06b715f commit e3783fd

File tree

5 files changed

+87
-74
lines changed

5 files changed

+87
-74
lines changed

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ message(DEBUG "INS_ENB : ${INS_ENB}")
107107
option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF)
108108
option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON)
109109
option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF)
110+
option(GGML_CPU_FFAST_MATH "ggml: use approximate math" OFF)
110111
option(GGML_SSE42 "ggml: enable SSE 4.2" ${INS_ENB})
111112
option(GGML_AVX "ggml: enable AVX" ${INS_ENB})
112113
option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF)

src/ggml-cpu/CMakeLists.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
2828
ggml-cpu/binary-ops.cpp
2929
ggml-cpu/unary-ops.h
3030
ggml-cpu/unary-ops.cpp
31+
ggml-cpu/unary-ops.inc
32+
ggml-cpu/unary-ops-ffast-math.cpp
3133
ggml-cpu/simd-mappings.h
3234
ggml-cpu/vec.h
3335
ggml-cpu/vec.cpp
@@ -64,6 +66,10 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
6466
endif()
6567
endif()
6668

69+
if (GGML_CPU_FFAST_MATH AND NOT MSVC)
70+
set_source_files_properties(ggml-cpu/unary-ops-ffast-math.cpp PROPERTIES COMPILE_FLAGS "-ffast-math $<$<CONFIG:RelWithDebInfo>:-O3>")
71+
endif()
72+
6773
if (GGML_LLAMAFILE)
6874
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_LLAMAFILE)
6975

src/ggml-cpu/unary-ops-ffast-math.cpp

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#include "unary-ops.inc"
2+
3+
// This file is compiled with -ffast-math only ifdef GGML_CPU_FFAST_MATH.
4+
// libmvec allows sine/cos vectorization but not bit-identically to libm.
5+
// Backends (e.g. CUDA) aren't bit-identical either, but more people expect the CPU backend to be.
6+
7+
static inline float op_sin(float x) {
8+
return sinf(x);
9+
}
10+
11+
static inline float op_cos(float x) {
12+
return cosf(x);
13+
}
14+
15+
void ggml_compute_forward_sin(const ggml_compute_params * params, ggml_tensor * dst) {
16+
unary_op<op_sin>(params, dst);
17+
}
18+
19+
void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor * dst) {
20+
unary_op<op_cos>(params, dst);
21+
}

src/ggml-cpu/unary-ops.cpp

Lines changed: 1 addition & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
#include "unary-ops.h"
1+
#include "unary-ops.inc"
22

33
static inline float op_abs(float x) {
44
return fabsf(x);
@@ -52,75 +52,10 @@ static inline float op_sqrt(float x) {
5252
return sqrtf(x);
5353
}
5454

55-
static inline float op_sin(float x) {
56-
return sinf(x);
57-
}
58-
59-
static inline float op_cos(float x) {
60-
return cosf(x);
61-
}
62-
6355
static inline float op_log(float x) {
6456
return logf(x);
6557
}
6658

67-
template <float (*op)(float), typename src0_t, typename dst_t>
68-
static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) {
69-
constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
70-
constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
71-
72-
for (int i = 0; i < n; i++) {
73-
y[i] = f32_to_dst(op(src0_to_f32(x[i])));
74-
}
75-
}
76-
77-
template <float (*op)(float), typename src0_t, typename dst_t>
78-
static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
79-
const ggml_tensor * src0 = dst->src[0];
80-
81-
GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst));
82-
83-
GGML_TENSOR_UNARY_OP_LOCALS
84-
85-
GGML_ASSERT( nb0 == sizeof(dst_t));
86-
GGML_ASSERT(nb00 == sizeof(src0_t));
87-
88-
const auto [ir0, ir1] = get_thread_range(params, src0);
89-
90-
for (int64_t ir = ir0; ir < ir1; ++ir) {
91-
const int64_t i03 = ir/(ne02*ne01);
92-
const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
93-
const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
94-
95-
dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
96-
const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
97-
98-
vec_unary_op<op>(ne0, dst_ptr, src0_ptr);
99-
}
100-
}
101-
102-
// TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates
103-
template <float (*op)(float)>
104-
static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
105-
const ggml_tensor * src0 = dst->src[0];
106-
107-
/* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32
108-
apply_unary_op<op, float, float>(params, dst);
109-
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16
110-
apply_unary_op<op, ggml_fp16_t, ggml_fp16_t>(params, dst);
111-
} else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16
112-
apply_unary_op<op, ggml_bf16_t, ggml_bf16_t>(params, dst);
113-
} else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) {
114-
apply_unary_op<op, ggml_bf16_t, float>(params, dst);
115-
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
116-
apply_unary_op<op, ggml_fp16_t, float>(params, dst);
117-
} else {
118-
fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__,
119-
ggml_type_name(dst->type), ggml_type_name(src0->type));
120-
GGML_ABORT("fatal error");
121-
}
122-
}
123-
12459
void ggml_compute_forward_abs(const ggml_compute_params * params, ggml_tensor * dst) {
12560
unary_op<op_abs>(params, dst);
12661
}
@@ -173,14 +108,6 @@ void ggml_compute_forward_sqrt(const ggml_compute_params * params, ggml_tensor *
173108
unary_op<op_sqrt>(params, dst);
174109
}
175110

176-
void ggml_compute_forward_sin(const ggml_compute_params * params, ggml_tensor * dst) {
177-
unary_op<op_sin>(params, dst);
178-
}
179-
180-
void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor * dst) {
181-
unary_op<op_cos>(params, dst);
182-
}
183-
184111
void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * dst) {
185112
unary_op<op_log>(params, dst);
186113
}

src/ggml-cpu/unary-ops.inc

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
#include "unary-ops.h"
2+
3+
template <float (*op)(float), typename src0_t, typename dst_t>
4+
static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) {
5+
constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32;
6+
constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32;
7+
8+
for (int i = 0; i < n; i++) {
9+
y[i] = f32_to_dst(op(src0_to_f32(x[i])));
10+
}
11+
}
12+
13+
template <float (*op)(float), typename src0_t, typename dst_t>
14+
static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
15+
const ggml_tensor * src0 = dst->src[0];
16+
17+
GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst));
18+
19+
GGML_TENSOR_UNARY_OP_LOCALS
20+
21+
GGML_ASSERT( nb0 == sizeof(dst_t));
22+
GGML_ASSERT(nb00 == sizeof(src0_t));
23+
24+
const auto [ir0, ir1] = get_thread_range(params, src0);
25+
26+
for (int64_t ir = ir0; ir < ir1; ++ir) {
27+
const int64_t i03 = ir/(ne02*ne01);
28+
const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
29+
const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
30+
31+
dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
32+
const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
33+
34+
vec_unary_op<op>(ne0, dst_ptr, src0_ptr);
35+
}
36+
}
37+
38+
// TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates
39+
template <float (*op)(float)>
40+
static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
41+
const ggml_tensor * src0 = dst->src[0];
42+
43+
/* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32
44+
apply_unary_op<op, float, float>(params, dst);
45+
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16
46+
apply_unary_op<op, ggml_fp16_t, ggml_fp16_t>(params, dst);
47+
} else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16
48+
apply_unary_op<op, ggml_bf16_t, ggml_bf16_t>(params, dst);
49+
} else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) {
50+
apply_unary_op<op, ggml_bf16_t, float>(params, dst);
51+
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
52+
apply_unary_op<op, ggml_fp16_t, float>(params, dst);
53+
} else {
54+
fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__,
55+
ggml_type_name(dst->type), ggml_type_name(src0->type));
56+
GGML_ABORT("fatal error");
57+
}
58+
}

0 commit comments

Comments
 (0)