|
1 |
| -#include "unary-ops.h" |
| 1 | +#include "unary-ops.inc" |
2 | 2 |
|
3 | 3 | static inline float op_abs(float x) {
|
4 | 4 | return fabsf(x);
|
@@ -52,75 +52,10 @@ static inline float op_sqrt(float x) {
|
52 | 52 | return sqrtf(x);
|
53 | 53 | }
|
54 | 54 |
|
55 |
| -static inline float op_sin(float x) { |
56 |
| - return sinf(x); |
57 |
| -} |
58 |
| - |
59 |
| -static inline float op_cos(float x) { |
60 |
| - return cosf(x); |
61 |
| -} |
62 |
| - |
63 | 55 | static inline float op_log(float x) {
|
64 | 56 | return logf(x);
|
65 | 57 | }
|
66 | 58 |
|
67 |
| -template <float (*op)(float), typename src0_t, typename dst_t> |
68 |
| -static inline void vec_unary_op(int64_t n, dst_t * y, const src0_t * x) { |
69 |
| - constexpr auto src0_to_f32 = type_conversion_table<src0_t>::to_f32; |
70 |
| - constexpr auto f32_to_dst = type_conversion_table<dst_t >::from_f32; |
71 |
| - |
72 |
| - for (int i = 0; i < n; i++) { |
73 |
| - y[i] = f32_to_dst(op(src0_to_f32(x[i]))); |
74 |
| - } |
75 |
| -} |
76 |
| - |
77 |
| -template <float (*op)(float), typename src0_t, typename dst_t> |
78 |
| -static void apply_unary_op(const ggml_compute_params * params, ggml_tensor * dst) { |
79 |
| - const ggml_tensor * src0 = dst->src[0]; |
80 |
| - |
81 |
| - GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst)); |
82 |
| - |
83 |
| - GGML_TENSOR_UNARY_OP_LOCALS |
84 |
| - |
85 |
| - GGML_ASSERT( nb0 == sizeof(dst_t)); |
86 |
| - GGML_ASSERT(nb00 == sizeof(src0_t)); |
87 |
| - |
88 |
| - const auto [ir0, ir1] = get_thread_range(params, src0); |
89 |
| - |
90 |
| - for (int64_t ir = ir0; ir < ir1; ++ir) { |
91 |
| - const int64_t i03 = ir/(ne02*ne01); |
92 |
| - const int64_t i02 = (ir - i03*ne02*ne01)/ne01; |
93 |
| - const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01); |
94 |
| - |
95 |
| - dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 ); |
96 |
| - const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01); |
97 |
| - |
98 |
| - vec_unary_op<op>(ne0, dst_ptr, src0_ptr); |
99 |
| - } |
100 |
| -} |
101 |
| - |
102 |
| -// TODO: Use the 'traits' lookup table (for type conversion fns), instead of a mass of 'if' conditions with long templates |
103 |
| -template <float (*op)(float)> |
104 |
| -static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) { |
105 |
| - const ggml_tensor * src0 = dst->src[0]; |
106 |
| - |
107 |
| - /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32 |
108 |
| - apply_unary_op<op, float, float>(params, dst); |
109 |
| - } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16 |
110 |
| - apply_unary_op<op, ggml_fp16_t, ggml_fp16_t>(params, dst); |
111 |
| - } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16 |
112 |
| - apply_unary_op<op, ggml_bf16_t, ggml_bf16_t>(params, dst); |
113 |
| - } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) { |
114 |
| - apply_unary_op<op, ggml_bf16_t, float>(params, dst); |
115 |
| - } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { |
116 |
| - apply_unary_op<op, ggml_fp16_t, float>(params, dst); |
117 |
| - } else { |
118 |
| - fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__, |
119 |
| - ggml_type_name(dst->type), ggml_type_name(src0->type)); |
120 |
| - GGML_ABORT("fatal error"); |
121 |
| - } |
122 |
| -} |
123 |
| - |
124 | 59 | void ggml_compute_forward_abs(const ggml_compute_params * params, ggml_tensor * dst) {
|
125 | 60 | unary_op<op_abs>(params, dst);
|
126 | 61 | }
|
@@ -173,14 +108,6 @@ void ggml_compute_forward_sqrt(const ggml_compute_params * params, ggml_tensor *
|
173 | 108 | unary_op<op_sqrt>(params, dst);
|
174 | 109 | }
|
175 | 110 |
|
176 |
| -void ggml_compute_forward_sin(const ggml_compute_params * params, ggml_tensor * dst) { |
177 |
| - unary_op<op_sin>(params, dst); |
178 |
| -} |
179 |
| - |
180 |
| -void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor * dst) { |
181 |
| - unary_op<op_cos>(params, dst); |
182 |
| -} |
183 |
| - |
184 | 111 | void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * dst) {
|
185 | 112 | unary_op<op_log>(params, dst);
|
186 | 113 | }
|
0 commit comments