Skip to content

Use static functions/variables if possible (#4423) #4427

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
# Get options for config files in parent directories,
# but override them if there's a conflict.
InheritParentConfig: true
# NOLINT(clang-tidy-config-check-not-enabled)
Checks: '
bugprone-argument-comment,
misc-use-internal-linkage,
'
CheckOptions:
- key: facebook-cuda-safe-api-call-check.HandlerName
Expand Down
10 changes: 5 additions & 5 deletions bench/BenchUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,22 +19,22 @@

namespace fbgemm {

std::default_random_engine eng;
static std::default_random_engine eng;

template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high, std::true_type) {
static void randFill(aligned_vector<T>& vec, T low, T high, std::true_type) {
std::uniform_int_distribution<int> dis(low, high);
std::generate(vec.begin(), vec.end(), [&] { return dis(eng); });
}

template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high, std::false_type) {
static void randFill(aligned_vector<T>& vec, T low, T high, std::false_type) {
std::uniform_real_distribution<T> dis(low, high);
std::generate(vec.begin(), vec.end(), [&] { return dis(eng); });
}

template <typename T>
void randFill(aligned_vector<T>& vec, T low, T high) {
static void randFill(aligned_vector<T>& vec, T low, T high) {
randFill(vec, low, high, std::is_integral<T>());
}

Expand Down Expand Up @@ -165,7 +165,7 @@ aligned_vector<float> getRandomSparseVector(
}

template <typename T>
aligned_vector<T> getRandomBlockSparseMatrix(
static aligned_vector<T> getRandomBlockSparseMatrix(
int Rows,
int Cols,
float fractionNonZerosBlocks,
Expand Down
10 changes: 5 additions & 5 deletions bench/ConvUnifiedBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ using namespace fbgemm;

// clang-format off
// 1D conv shapes
vector<conv_param_t<1>> shapes_1d = {
static vector<conv_param_t<1>> shapes_1d = {
// MB, IC, OC, IW, G, KW, stride_w, pad_w_left, pad_w_right,
// (dilation, output_padding_w, tranpose)
// regular
Expand All @@ -46,7 +46,7 @@ vector<conv_param_t<1>> shapes_1d = {
};

// 2D conv shapes
vector<conv_param_t<2>> shapes_2d = {
static vector<conv_param_t<2>> shapes_2d = {
// MB, IC, OC, IH, IW, G, KH, KW, stride_h, stride_w,
// pad_h_top, pad_w_left, pad_h_bottom, pad_w_right,
// (dilation_h, dilation_w, output_padding_h, output_padding_w, tranpose)
Expand Down Expand Up @@ -84,7 +84,7 @@ vector<conv_param_t<2>> shapes_2d = {
{1, 1}, {0, 0, 0, 0})
};

vector<conv_param_t<2>> shapes_2d_resnext_101 = {
static vector<conv_param_t<2>> shapes_2d_resnext_101 = {
// ResNext-101 (unique shapes only)
// conv_param_t<>(N, C, M, H, W, groups, /* kern */ {KH, KW}, /* stride */
// {stride_h, stride_w}, /* padding pad_l = pad_h */ {pad_l, pad_l, pad_l, pad_l}, /* dialation */
Expand Down Expand Up @@ -143,7 +143,7 @@ vector<conv_param_t<2>> shapes_2d_resnext_101 = {
};

// 3D conv shapes
vector<conv_param_t<3>> shapes_3d = {
static vector<conv_param_t<3>> shapes_3d = {
// MB, IC, OC, {IT, IH, IW}, G, {KT, KH, KW}, {stride_t, stride_h,
// stride_w},
// {pad_prev, pad_h_top, pad_w_left, pad_next, pad_h_bottom, pad_w_right},
Expand Down Expand Up @@ -216,7 +216,7 @@ vector<conv_param_t<3>> shapes_3d = {
// clang-format on

template <int SPATIAL_DIM, typename Acc_t>
void performance_test(
static void performance_test(
const vector<conv_param_t<SPATIAL_DIM>>& shapes,
bool flush,
int repetitions) {
Expand Down
2 changes: 1 addition & 1 deletion bench/ConvertBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
using namespace std;
using namespace fbgemm;

void performance_test() {
static void performance_test() {
constexpr int NWARMUP = 4;
constexpr int NITER = 256;

Expand Down
2 changes: 1 addition & 1 deletion bench/EmbeddingIndexRemappingBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

int run_benchmark(
static int run_benchmark(
int batch_size,
int num_rows,
int average_len,
Expand Down
2 changes: 1 addition & 1 deletion bench/EmbeddingQuantizeBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ using namespace fbgemm;

// T is the type of scale and bias
template <typename T>
void performance_test() {
static void performance_test() {
constexpr int NWARMUP = 4;
constexpr int NITER = 256;

Expand Down
2 changes: 1 addition & 1 deletion bench/EmbeddingQuantizeFloatToFloatOrHalfBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ using namespace fbgemm;

// T is the type of scale and bias
template <typename T>
void performance_test() {
static void performance_test() {
constexpr int NWARMUP = 4;
constexpr int NITER = 256;

Expand Down
15 changes: 8 additions & 7 deletions bench/EmbeddingSpMDM8BitBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,16 @@
using namespace std;
using namespace fbgemm;

void print_fused_table(int rows, int embedding_dim, const uint8_t* table) {
for (int i = 0; i < rows; i++) {
cout << "row: " << i << " : " << endl;
for (int ii = 0; ii < embedding_dim; ii++) {
cout << (int)table[i * (embedding_dim + 2 * sizeof(float)) + ii] << ",";
/*
static void print_fused_table(int rows, int embedding_dim, const uint8_t* table)
{ for (int i = 0; i < rows; i++) { cout << "row: " << i << " : " << endl; for
(int ii = 0; ii < embedding_dim; ii++) { cout << (int)table[i * (embedding_dim +
2 * sizeof(float)) + ii] << ",";
}
cout << endl;
}
}
*/

static vector<vector<int>> GetInputs_() {
vector<vector<int>> input_dims = {
Expand All @@ -58,10 +59,10 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

vector<double> benchmarkTimes;
static vector<double> benchmarkTimes;

template <typename OutType>
int run_benchmark(
static int run_benchmark(
int batch_size,
int num_rows,
int embedding_dim,
Expand Down
2 changes: 1 addition & 1 deletion bench/EmbeddingSpMDMBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

void run_benchmark(
static void run_benchmark(
int batch_size,
int num_rows,
int embedding_dim,
Expand Down
15 changes: 2 additions & 13 deletions bench/EmbeddingSpMDMNBit2Benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -171,17 +171,6 @@ static void print_benchmark_results() {
}
}

void print_fused_table(int rows, int embedding_dim, const uint8_t* table) {
for (int i = 0; i < rows; i++) {
std::cout << "row: " << i << " : " << std::endl;
for (int ii = 0; ii < embedding_dim; ii++) {
std::cout << (int)table[i * (embedding_dim + 2 * sizeof(float)) + ii]
<< ",";
}
std::cout << std::endl;
}
}

static vector<vector<int>> GetInputs_() {
vector<vector<int>> input_dims = {
// batch size, number of rows of table, emb dim , avg lengthl
Expand All @@ -200,7 +189,7 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

int run_benchmark(
static int run_benchmark(
int bit_rate,
int batch_size,
int num_rows,
Expand Down Expand Up @@ -488,7 +477,7 @@ int run_benchmark(
return 0;
}

void sweep_benchmark(KernelType kern_type) {
static void sweep_benchmark(KernelType kern_type) {
int batch_size;
int num_rows;
int embedding_dim;
Expand Down
13 changes: 7 additions & 6 deletions bench/EmbeddingSpMDMNBitBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,17 @@
using namespace std;
using namespace fbgemm;

void print_fused_table(int rows, int embedding_dim, const uint8_t* table) {
for (int i = 0; i < rows; i++) {
std::cout << "row: " << i << " : " << std::endl;
for (int ii = 0; ii < embedding_dim; ii++) {
std::cout << (int)table[i * (embedding_dim + 2 * sizeof(float)) + ii]
/*
static void print_fused_table(int rows, int embedding_dim, const uint8_t* table)
{ for (int i = 0; i < rows; i++) { std::cout << "row: " << i << " : " <<
std::endl; for (int ii = 0; ii < embedding_dim; ii++) { std::cout <<
(int)table[i * (embedding_dim + 2 * sizeof(float)) + ii]
<< ",";
}
std::cout << std::endl;
}
}
*/

static vector<vector<int>> GetInputs_() {
vector<vector<int>> input_dims = {
Expand All @@ -62,7 +63,7 @@ static vector<vector<int>> GetInputs_() {
}

template <typename OutType>
int run_benchmark(
static int run_benchmark(
int bit_rate,
int batch_size,
int num_rows,
Expand Down
13 changes: 7 additions & 6 deletions bench/EmbeddingSpMDMNBitRowWiseSparseBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,16 +31,17 @@
using namespace std;
using namespace fbgemm;

void print_fused_table(int rows, int embedding_dim, const uint8_t* table) {
for (int i = 0; i < rows; i++) {
std::cout << "row: " << i << " : " << std::endl;
for (int ii = 0; ii < embedding_dim; ii++) {
std::cout << (int)table[i * (embedding_dim + 2 * sizeof(float)) + ii]
/*
static void print_fused_table(int rows, int embedding_dim, const uint8_t* table)
{ for (int i = 0; i < rows; i++) { std::cout << "row: " << i << " : " <<
std::endl; for (int ii = 0; ii < embedding_dim; ii++) { std::cout <<
(int)table[i * (embedding_dim + 2 * sizeof(float)) + ii]
<< ",";
}
std::cout << std::endl;
}
}
*/

static vector<vector<int>> GetInputs_() {
vector<vector<int>> input_dims = {
Expand All @@ -60,7 +61,7 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

int run_benchmark(
static int run_benchmark(
int bit_rate,
int batch_size,
int num_rows,
Expand Down
7 changes: 2 additions & 5 deletions bench/GEMMsBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,8 @@
using namespace std;
using namespace fbgemm;

void performance_test(
const int M,
const int N,
const int K,
const bool timebreak) {
static void
performance_test(const int M, const int N, const int K, const bool timebreak) {
// clang-format off
const vector<vector<int>> shapes = {
// NOTE: clang-format wants to use a different formatting but the current
Expand Down
2 changes: 1 addition & 1 deletion bench/GEMMsTunableBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
using namespace std;
using namespace fbgemm;

void performance_test(
static void performance_test(
const BlockingFactors* tuning_params,
set<vector<int>>& incorrect_configs,
const vector<int>& shape,
Expand Down
2 changes: 1 addition & 1 deletion bench/GroupwiseConvRequantizeBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
using namespace std;
using namespace fbgemm;

void performance_test() {
static void performance_test() {
// clang-format off
const vector<conv_param_t<>> shapes = {
// MB, IC, OC, {IH, IW}, G, {KH, KW}, {stride_h, stride_w}, pad_t, pad_l,
Expand Down
2 changes: 1 addition & 1 deletion bench/Im2ColFusedRequantizeBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ using namespace std;
using namespace fbgemm;

template <typename Acc_t>
void performance_test() {
static void performance_test() {
vector<conv_param_t<>> shapes = {
// MB, IC, OC, IH, IW, G, KH, KW, stride_h, stride_w,
// pad_h_top, pad_w_left, pad_h_bottom, pad_w_right
Expand Down
2 changes: 1 addition & 1 deletion bench/PackedFloatInOutBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
using namespace std;
using namespace fbgemm;

void performance_test() {
static void performance_test() {
// clang-format off
const vector<vector<int>> shapes = {
// NOTE: clang-format wants to use a different formatting but the current
Expand Down
2 changes: 1 addition & 1 deletion bench/PackedRequantizeAcc16Benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ enum class BenchmarkType {
EVERYTHING, // row-offset in input packing, and requantization + spmdm
};

void performance_test() {
static void performance_test() {
// clang-format off
vector<vector<int>> shapes = {
// NOTE: clang-format wants to use a different formatting but the current
Expand Down
2 changes: 1 addition & 1 deletion bench/PackedRequantizeAcc32Benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
using namespace std;
using namespace fbgemm;

void performance_test() {
static void performance_test() {
// clang-format off
vector<vector<int>> shapes = {
// NOTE: clang-format wants to use a different formatting but the current
Expand Down
2 changes: 1 addition & 1 deletion bench/RequantizeBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ enum class BenchmarkType {
PER_CHANNEL,
};

void performance_test() {
static void performance_test() {
constexpr int NWARMUP = 4;
constexpr int NITER = 256;

Expand Down
2 changes: 1 addition & 1 deletion bench/RowOffsetBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
using namespace std;
using namespace fbgemm;

void performance_test() {
static void performance_test() {
constexpr int NWARMUP = 4;
constexpr int NITER = 256;

Expand Down
4 changes: 2 additions & 2 deletions bench/RowwiseAdagradBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

vector<int> prefetch_distances{16};
static vector<int> prefetch_distances{16};

void run_benchmark(
static void run_benchmark(
const int num_rows, // number of rows reading
const int block_size, // number of parameters per row
const uint64_t param_size, // total number of parameters
Expand Down
2 changes: 1 addition & 1 deletion bench/RowwiseAdagradFusedBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

void run_benchmark(
static void run_benchmark(
int batch_size,
int num_rows,
int embedding_dim,
Expand Down
2 changes: 1 addition & 1 deletion bench/SparseAdagradBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ static vector<vector<int>> GetInputs_() {
return input_dims;
}

void run_benchmark(
static void run_benchmark(
const int num_rows, // number of rows reading
const int block_size, // number of parameters per row
const uint64_t param_size, // total number of parameters
Expand Down
2 changes: 1 addition & 1 deletion bench/TransposeBenchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ using namespace std;
using namespace fbgemm;

template <typename T>
void performance_test() {
static void performance_test() {
constexpr int NWARMUP = 4;
constexpr int NITER = 256;

Expand Down
Loading
Loading