Skip to content

Commit 7fa9c69

Browse files
authored
Fix compile issues (#2063)
init
1 parent a61e302 commit 7fa9c69

File tree

5 files changed

+8
-8
lines changed

5 files changed

+8
-8
lines changed

torchao/experimental/kernels/cpu/aarch64/benchmarks/benchmark_bitpacking.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ template <
2727
typename pack_8_values_fn_type,
2828
typename vec_pack_64_values_fn_type,
2929
typename vec_pack_128_values_fn_type>
30-
TORCHAO_ALWAYS_INLINE void pack_uint_odd_bit_values(
30+
TORCHAO_ALWAYS_INLINE inline void pack_uint_odd_bit_values(
3131
pack_8_values_fn_type pack_8_values_func,
3232
vec_pack_64_values_fn_type vec_pack_64_values_func,
3333
vec_pack_128_values_fn_type vec_pack_128_values_func,
@@ -94,7 +94,7 @@ template <
9494
typename unpack_8_values_fn_type,
9595
typename vec_unpack_64_values_fn_type,
9696
typename vec_unpack_128_values_fn_type>
97-
TORCHAO_ALWAYS_INLINE void unpack_uint_odd_bit_values(
97+
TORCHAO_ALWAYS_INLINE inline void unpack_uint_odd_bit_values(
9898
unpack_8_values_fn_type unpack_8_values_func,
9999
vec_unpack_64_values_fn_type vec_unpack_64_values_func,
100100
vec_unpack_128_values_fn_type vec_unpack_128_values_func,

torchao/experimental/kernels/cpu/aarch64/matmul/channelwise_8bit_a_channelwise_8bit_b_1x16x16_f32_smlal-impl.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ Possibly better to transpose 16x16 of b and use dotprod. Left for future.
4545
*/
4646

4747
template <int lane>
48-
TORCHAO_ALWAYS_INLINE void block_mul_1x16x1(
48+
TORCHAO_ALWAYS_INLINE inline void block_mul_1x16x1(
4949
const int16x4_t& a_vec,
5050
const int8x16_t& b_vec,
5151
const int8x16_t& b_zero_point_vec,
@@ -129,7 +129,7 @@ void block_mul_1x16x16(
129129
vget_high_s16(a_vec_high), b_vec, b_zero_point_vec, partial_sums);
130130
}
131131

132-
TORCHAO_ALWAYS_INLINE void dequantize_1x16_int32_t(
132+
TORCHAO_ALWAYS_INLINE inline void dequantize_1x16_int32_t(
133133
const int32x4_t (&sums)[4],
134134
const float* lhs_scales,
135135
const float* rhs_scales,

torchao/experimental/kernels/cpu/aarch64/matmul/fp32_a_input_channelwise_8bit_b_1x16x4_f32_impl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ For each int8x16_t of b:
4040
- By doing the above 4 times (lane=[0-3]), we used all values along k dim of a
4141
and accumulated 4 float32x4_t values
4242
*/
43-
TORCHAO_ALWAYS_INLINE void block_mul_1x16x1(
43+
TORCHAO_ALWAYS_INLINE inline void block_mul_1x16x1(
4444
const float32_t a,
4545
const int8x16_t& b_vec,
4646
const int8_t b_zero_point,

torchao/experimental/kernels/cpu/aarch64/matmul/fp32_a_input_channelwise_8bit_b_4x16x4_f32_impl.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ For each int8x16_t of b:
4040
- By doing the above 4 times (lane=[0-3]), we used all values along k dim of a
4141
and accumulated 4 float32x4_t values
4242
*/
43-
TORCHAO_ALWAYS_INLINE void block_mul_4x16x1(
43+
TORCHAO_ALWAYS_INLINE inline void block_mul_4x16x1(
4444
const float32x4_t& a,
4545
const int8x16_t& b_vec,
4646
const int8_t b_zero_point,
@@ -82,7 +82,7 @@ TORCHAO_ALWAYS_INLINE void block_mul_4x16x1(
8282
partial_sums[3][3] = vfmaq_n_f32(partial_sums[3][3], b_vec_high_high, a[3]);
8383
}
8484

85-
TORCHAO_ALWAYS_INLINE void block_mul_4x16x4(
85+
TORCHAO_ALWAYS_INLINE inline void block_mul_4x16x4(
8686
const float32_t* a,
8787
const size_t lda,
8888
const int8_t* b,

torchao/experimental/kernels/cpu/aarch64/matmul/matmul_utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ void transpose_4x4(
7171
const size_t lda,
7272
float32x4_t (&tranposed)[4]);
7373

74-
TORCHAO_ALWAYS_INLINE void transpose_4x4(
74+
TORCHAO_ALWAYS_INLINE inline void transpose_4x4(
7575
const float32_t* a,
7676
const size_t lda,
7777
float32x4_t (&tranposed)[4]) {

0 commit comments

Comments
 (0)