Skip to content

Commit 3d50cc0

Browse files
STY: style fix 4/23/2021 (#685)
1 parent b12034a commit 3d50cc0

File tree

6 files changed

+46
-46
lines changed

6 files changed

+46
-46
lines changed

dpnp/backend/include/dpnp_iface.hpp

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -233,15 +233,15 @@ INP_DLLEXPORT void dpnp_elemwise_absolute_c(void* array1_in, void* result1, size
233233
*/
234234
template <typename _DataType_output, typename _DataType_input1, typename _DataType_input2>
235235
INP_DLLEXPORT void dpnp_dot_c(void* result_out,
236-
const void* input1_in,
237-
const size_t input1_size,
238-
const size_t* input1_shape,
239-
const size_t input1_shape_ndim,
240-
const void* input2_in,
241-
const size_t input2_size,
242-
const size_t* input2_shape,
243-
const size_t input2_shape_ndim,
244-
const size_t* where);
236+
const void* input1_in,
237+
const size_t input1_size,
238+
const size_t* input1_shape,
239+
const size_t input1_shape_ndim,
240+
const void* input2_in,
241+
const size_t input2_size,
242+
const size_t* input2_shape,
243+
const size_t input2_shape_ndim,
244+
const size_t* where);
245245

246246
/**
247247
* @ingroup BACKEND_API
@@ -336,7 +336,8 @@ INP_DLLEXPORT void dpnp_sum_c(void* result_out,
336336
* @param [in] ndim Number of elements in shape.
337337
*/
338338
template <typename _DataType>
339-
INP_DLLEXPORT void dpnp_partition_c(void* array, void* array2, void* result, const size_t kth, const size_t* shape, const size_t ndim);
339+
INP_DLLEXPORT void
340+
dpnp_partition_c(void* array, void* array2, void* result, const size_t kth, const size_t* shape, const size_t ndim);
340341

341342
/**
342343
* @ingroup BACKEND_API
@@ -485,15 +486,15 @@ INP_DLLEXPORT void dpnp_cholesky_c(void* array1_in, void* result1, const size_t
485486
*/
486487
template <typename _DataType_output, typename _DataType_input1, typename _DataType_input2>
487488
INP_DLLEXPORT void dpnp_correlate_c(void* result_out,
488-
const void* input1_in,
489-
const size_t input1_size,
490-
const size_t* input1_shape,
491-
const size_t input1_shape_ndim,
492-
const void* input2_in,
493-
const size_t input2_size,
494-
const size_t* input2_shape,
495-
const size_t input2_shape_ndim,
496-
const size_t* where);
489+
const void* input1_in,
490+
const size_t input1_size,
491+
const size_t* input1_shape,
492+
const size_t input1_shape_ndim,
493+
const void* input2_in,
494+
const size_t input2_size,
495+
const size_t* input2_shape,
496+
const size_t input2_shape_ndim,
497+
const size_t* where);
497498

498499
/**
499500
* @ingroup BACKEND_API

dpnp/backend/kernels/dpnp_krnl_common.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ void dpnp_dot_c(void* result_out,
8686
const size_t input2_shape_ndim,
8787
const size_t* where)
8888
{
89-
9089
(void)input1_shape;
9190
(void)input1_shape_ndim;
9291
(void)input2_size;

dpnp/backend/kernels/dpnp_krnl_elemwise.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -370,8 +370,8 @@ static void func_map_init_elemwise_1arg_1type(func_map_t& fmap)
370370
_DataType_input2* input2_data = reinterpret_cast<_DataType_input2*>(const_cast<void*>(input2_in)); \
371371
_DataType_output* result = reinterpret_cast<_DataType_output*>(result_out); \
372372
\
373-
std::vector<size_t> result_shape = get_result_shape(input1_shape, input1_shape_ndim, \
374-
input2_shape, input2_shape_ndim); \
373+
std::vector<size_t> result_shape = \
374+
get_result_shape(input1_shape, input1_shape_ndim, input2_shape, input2_shape_ndim); \
375375
\
376376
DPNPC_id<_DataType_input1>* input1_it; \
377377
const size_t input1_it_size_in_bytes = sizeof(DPNPC_id<_DataType_input1>); \

dpnp/backend/kernels/dpnp_krnl_sorting.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ template <typename _DataType>
8787
class dpnp_partition_c_kernel;
8888

8989
template <typename _DataType>
90-
void dpnp_partition_c(void* array1_in, void* array2_in, void* result1, const size_t kth, const size_t* shape_, const size_t ndim)
90+
void dpnp_partition_c(
91+
void* array1_in, void* array2_in, void* result1, const size_t kth, const size_t* shape_, const size_t ndim)
9192
{
9293
_DataType* arr = reinterpret_cast<_DataType*>(array1_in);
9394
_DataType* arr2 = reinterpret_cast<_DataType*>(array2_in);
@@ -109,7 +110,7 @@ void dpnp_partition_c(void* array1_in, void* array2_in, void* result1, const siz
109110
size *= shape_[i];
110111
}
111112

112-
size_t size_ = size/shape_[ndim-1];
113+
size_t size_ = size / shape_[ndim - 1];
113114

114115
if (size_ == 0)
115116
{
@@ -121,29 +122,29 @@ void dpnp_partition_c(void* array1_in, void* array2_in, void* result1, const siz
121122

122123
for (size_t i = 0; i < size_; ++i)
123124
{
124-
size_t ind_begin = i * shape_[ndim-1];
125-
size_t ind_end = (i + 1) * shape_[ndim-1] - 1;
125+
size_t ind_begin = i * shape_[ndim - 1];
126+
size_t ind_end = (i + 1) * shape_[ndim - 1] - 1;
126127

127-
_DataType matrix[shape_[ndim-1]];
128+
_DataType matrix[shape_[ndim - 1]];
128129
for (size_t j = ind_begin; j < ind_end + 1; ++j)
129130
{
130131
size_t ind = j - ind_begin;
131132
matrix[ind] = arr2[j];
132133
}
133-
std::partial_sort(matrix, matrix + shape_[ndim-1], matrix + shape_[ndim-1]);
134+
std::partial_sort(matrix, matrix + shape_[ndim - 1], matrix + shape_[ndim - 1]);
134135
for (size_t j = ind_begin; j < ind_end + 1; ++j)
135136
{
136137
size_t ind = j - ind_begin;
137138
arr2[j] = matrix[ind];
138139
}
139140
}
140141

141-
    size_t* shape = reinterpret_cast<size_t*>(dpnp_memory_alloc_c(ndim * sizeof(size_t)));
142+
    size_t* shape = reinterpret_cast<size_t*>(dpnp_memory_alloc_c(ndim * sizeof(size_t)));
142143
auto memcpy_event = DPNP_QUEUE.memcpy(shape, shape_, ndim * sizeof(size_t));
143144

144145
memcpy_event.wait();
145146

146-
cl::sycl::range<2> gws(size_, kth+1);
147+
cl::sycl::range<2> gws(size_, kth + 1);
147148
auto kernel_parallel_for_func = [=](cl::sycl::id<2> global_id) {
148149
size_t j = global_id[0];
149150
size_t k = global_id[1];
@@ -160,7 +161,6 @@ void dpnp_partition_c(void* array1_in, void* array2_in, void* result1, const siz
160161
result[j * shape[ndim - 1] + i] = change_val2;
161162
}
162163
}
163-
164164
};
165165

166166
auto kernel_func = [&](cl::sycl::handler& cgh) {
@@ -172,7 +172,7 @@ void dpnp_partition_c(void* array1_in, void* array2_in, void* result1, const siz
172172

173173
event.wait();
174174

175-
    dpnp_memory_free_c(shape);
175+
    dpnp_memory_free_c(shape);
176176
}
177177

178178
template <typename _DataType>

dpnp/backend/src/dpnp_utils.hpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,7 @@ size_t get_id_by_xyz_inkernel(const _DataType* xyz, size_t xyz_size, const _Data
142142
*
143143
* @return Input shape is broadcastable to output one or not.
144144
*/
145-
static inline bool
146-
broadcastable(const std::vector<size_t>& input_shape, const std::vector<size_t>& output_shape)
145+
static inline bool broadcastable(const std::vector<size_t>& input_shape, const std::vector<size_t>& output_shape)
147146
{
148147
if (input_shape.size() > output_shape.size())
149148
{
@@ -173,7 +172,7 @@ static inline bool
173172
/**
174173
* @ingroup BACKEND_UTILS
175174
* @brief Get common shape based on input shapes.
176-
*
175+
*
177176
* Example:
178177
* Input1 shape A[8, 1, 6, 1]
179178
* Input2 shape B[7, 1, 5]
@@ -187,9 +186,10 @@ static inline bool
187186
* @exception std::domain_error Input shapes are not broadcastable.
188187
* @return Common shape.
189188
*/
190-
static inline std::vector<size_t>
191-
get_result_shape(const size_t* input1_shape, const size_t input1_shape_size,
192-
const size_t* input2_shape, const size_t input2_shape_size)
189+
static inline std::vector<size_t> get_result_shape(const size_t* input1_shape,
190+
const size_t input1_shape_size,
191+
const size_t* input2_shape,
192+
const size_t input2_shape_size)
193193
{
194194
const size_t result_shape_size = (input2_shape_size > input1_shape_size) ? input2_shape_size : input1_shape_size;
195195
std::vector<size_t> result_shape;

dpnp/backend/tests/test_broadcast_iterator.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ TEST_P(IteratorBroadcasting, sycl_broadcast)
123123
* input = np.arange(1, input_size + 1, dtype=np.int64).reshape(input_shape)
124124
* print(f"input shape={input.shape}")
125125
* print(f"input:\n{input}\n")
126-
*
126+
*
127127
* output_shape = [2, 3, 4]
128128
* output = np.ones(output_shape, dtype=np.int64)
129129
* print(f"output shape={output.shape}")
@@ -143,11 +143,11 @@ INSTANTIATE_TEST_SUITE_P(
143143
IteratorParameters{{4}, {3, 4}, {1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}},
144144
IteratorParameters{{4}, {2, 3, 4}, {1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}},
145145
IteratorParameters{{3, 4}, {3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}},
146-
IteratorParameters{{3, 4}, {2, 3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
147-
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}},
148-
IteratorParameters{{2, 3, 4}, {2, 3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
146+
IteratorParameters{
147+
{3, 4}, {2, 3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}},
148+
IteratorParameters{{2, 3, 4}, {2, 3, 4}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
149149
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}},
150-
IteratorParameters{{2, 3, 1}, {2, 3, 4}, {1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,
151-
4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6}},
152-
IteratorParameters{{2, 1, 4}, {2, 3, 4}, {1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4,
153-
5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8}}));
150+
IteratorParameters{
151+
{2, 3, 1}, {2, 3, 4}, {1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6}},
152+
IteratorParameters{
153+
{2, 1, 4}, {2, 3, 4}, {1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8}}));

0 commit comments

Comments
 (0)