Skip to content
This repository was archived by the owner on Apr 28, 2023. It is now read-only.

Commit 044c263

Browse files
Merge pull request #166 from facebookresearch/fix-flags
Add pybindings for gflags
2 parents c04464f + b9ce707 commit 044c263

File tree

10 files changed

+50
-60
lines changed

10 files changed

+50
-60
lines changed

docs/source/framework/pytorch_integration/debugging.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ can use these flags to enable logging. Various types of flags exposed are:
1616
* :code:`debug_tuner`: print debug spew for the tuner multithreading behavior.
1717

1818

19-
In order to use enable these flags, you need to call :code:`tc.GlobalDebugInit`
19+
In order to use enable these flags, you need to call :code:`tc.SetDebugFlags`
2020
and set the proper flags to :code:`true`. All of these flags are :code:`boolean`
2121
flags that take values :code:`true` or :code:`false`.
2222

@@ -28,14 +28,14 @@ Example usage
2828
import tensor_comprehensions as tc
2929
import torch
3030
31-
tc.GlobalDebugInit(["--debug_tc_mapper=true", "--debug_lang=false"])
31+
tc.SetDebugFlags(debug_tc_mapper=True, debug_lang=False)
3232
3333
matmul = tc.define(tc.database['matmul']['lang'], name='matmul')
3434
mat1, mat2 = torch.randn(3, 4).cuda(), torch.randn(4, 5).cuda()
3535
out = matmul(mat1, mat2)
3636
3737
In above example, when the TC executes, we will see the TC mapper information.
38-
You can chose to set any number of flags but the :code:`tc.GlobalDebugInit` should
38+
You can chose to set any number of flags but the :code:`tc.SetDebugFlags` should
3939
only be called once.
4040

4141
Printing TC generated CUDA code
@@ -50,7 +50,7 @@ and the generated CUDA code will be printed on command line.
5050
import tensor_comprehensions as tc
5151
import torch
5252
53-
tc.GlobalDebugInit(["--dump_cuda=true"])
53+
tc.SetDebugFlags(dump_cuda=True)
5454
5555
matmul = tc.define(tc.database['matmul']['lang'], name='matmul')
5656
mat1, mat2 = torch.randn(3, 4).cuda(), torch.randn(4, 5).cuda()

include/tc/core/flags.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,4 @@ DECLARE_bool(schedule_tree_verbose_validation);
6161
// random seed setting for reproducibility and debugging purposes
6262
uint64_t initRandomSeed();
6363
const uint64_t& randomSeed();
64-
65-
// python
66-
namespace python {
67-
bool globalDebugGflagsGlogInit(int* pargc, char*** pargv);
68-
} // namespace python
69-
7064
} // namespace tc

src/core/flags.cc

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -111,34 +111,6 @@ DEFINE_int64(
111111
-1,
112112
"The number of best candidates to restore from the proto cache");
113113

114-
namespace {
115-
bool parseCommandLineFlags(int* pargc, char*** pargv) {
116-
if (*pargc == 0) {
117-
return true;
118-
}
119-
// TODO: (prigoyal): we need to do some filtering on flags here,
120-
// add option for displaying the help message
121-
return ::gflags::ParseCommandLineFlags(pargc, pargv, true);
122-
}
123-
124-
bool initGoogleLogging(int* pargc, char** argv) {
125-
if (*pargc == 0) {
126-
return true;
127-
}
128-
::google::InitGoogleLogging(argv[0]);
129-
return true;
130-
}
131-
} // namespace
132-
133-
namespace python {
134-
bool globalDebugGflagsGlogInit(int* pargc, char*** pargv) {
135-
bool success = true;
136-
success &= parseCommandLineFlags(pargc, pargv);
137-
success &= initGoogleLogging(pargc, *pargv);
138-
return success;
139-
}
140-
} // namespace python
141-
142114
uint64_t initRandomSeed() {
143115
static std::mutex mut;
144116
static bool inited = false;

tensor_comprehensions/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from tensor_comprehensions.tc_unit import TcUnit
1919
from tensor_comprehensions.tc_unit import TcAutotuner
2020
from tensor_comprehensions.tc_unit import TcCompilationUnit
21-
from tensor_comprehensions.tc_unit import GlobalDebugInit
21+
from tensor_comprehensions.tc_unit import SetDebugFlags
2222
from tensor_comprehensions.tc_unit import autotuner_settings
2323
from tensor_comprehensions.tc_unit import small_sizes_autotuner_settings
2424
from tensor_comprehensions.tc_unit import ATenCompilationUnit
@@ -27,6 +27,6 @@
2727

2828
__all__ = [
2929
'define', 'TcUnit', 'TcAutotuner', 'TcCompilationUnit', 'autotuner_settings',
30-
'small_sizes_autotuner_settings', 'GlobalDebugInit', 'ATenCompilationUnit',
30+
'small_sizes_autotuner_settings', 'SetDebugFlags', 'ATenCompilationUnit',
3131
'Options', 'database', 'decode',
3232
]

tensor_comprehensions/pybinds/pybind_engine.cc

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,21 +39,26 @@ namespace py = pybind11;
3939
using ATenCudaCompilationUnit = tc::ATenCompilationUnit<tc::CudaTcExecutor>;
4040

4141
PYBIND11_MODULE(tc, m) {
42+
m.def("set_logtostderr", [](bool logtostderr) {
43+
FLAGS_logtostderr = logtostderr;
44+
});
45+
m.def("set_debug_lang", [](bool debug_lang) {
46+
tc::FLAGS_debug_lang = debug_lang;
47+
});
48+
m.def("set_debug_halide", [](bool debug_halide) {
49+
tc::FLAGS_debug_halide = debug_halide;
50+
});
51+
m.def("set_debug_tc_mapper", [](bool debug_tc_mapper) {
52+
tc::FLAGS_debug_tc_mapper = debug_tc_mapper;
53+
});
54+
m.def("set_debug_cuda", [](bool debug_cuda) {
55+
tc::FLAGS_debug_cuda = debug_cuda;
56+
});
57+
m.def("set_debug_tuner", [](bool debug_tuner) {
58+
tc::FLAGS_debug_tuner = debug_tuner;
59+
});
4260
m.def(
43-
"global_debug_init", // exposing the debugging flags to people
44-
[](std::vector<std::string> args) {
45-
if (args.size() > 0) {
46-
args.insert(args.begin(), "tc");
47-
}
48-
int numArgs = args.size();
49-
// now we construct a char** argv type from args
50-
std::vector<char*> vargs; // char* vector args
51-
for (auto& arg : args) {
52-
vargs.push_back(const_cast<char*>(arg.data()));
53-
}
54-
char** argv = vargs.data();
55-
tc::python::globalDebugGflagsGlogInit(&numArgs, &argv);
56-
});
61+
"set_dump_cuda", [](bool dump_cuda) { tc::FLAGS_dump_cuda = dump_cuda; });
5762

5863
py::object dlpack;
5964
try {

tensor_comprehensions/tc_unit.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from torch.autograd import Variable
2020

2121
from tensor_comprehensions.tc import ATenCompilationUnit
22-
from tensor_comprehensions.tc import global_debug_init as GlobalDebugInit
22+
from tensor_comprehensions.tc import set_logtostderr, set_debug_lang, set_debug_halide, set_debug_tc_mapper, set_debug_cuda, set_debug_tuner, set_dump_cuda
2323
from tensor_comprehensions.torch_tc.tc_function import TCFunction, unpack_variables, get_tensors, make_contiguous
2424
from tensor_comprehensions.autotuner import ATenAutotuner
2525
from tensor_comprehensions.mapping_options import Options
@@ -38,6 +38,26 @@
3838
"threads": 32, "generations": 5, "tuner_min_launch_total_threads": 1,
3939
}
4040

41+
###############################################################################
42+
# Set global debugging flags
43+
###############################################################################
44+
class SetDebugFlags(object):
45+
def __init__(self, **kwargs):
46+
self.set_gflags(**kwargs)
47+
48+
def set_gflags(
49+
self, debug_lang=False, debug_halide=False, debug_tc_mapper=False,
50+
debug_cuda=False, debug_tuner=False, dump_cuda=False, **kwargs
51+
):
52+
set_logtostderr(True)
53+
set_debug_lang(debug_lang)
54+
set_debug_halide(debug_halide)
55+
set_debug_tc_mapper(debug_tc_mapper)
56+
set_debug_cuda(debug_cuda)
57+
set_debug_tuner(debug_tuner)
58+
set_dump_cuda(dump_cuda)
59+
60+
4161
###############################################################################
4262
# Some helper functions
4363
###############################################################################

test_python/layers/test_dump_cuda.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,11 @@
1616
import tensor_comprehensions as tc
1717

1818
import torch
19-
import torch.cuda
2019
import unittest
2120

2221
# enable this to dump cuda code generated whenever tc layer runs: simple run or
2322
# autotuner run
24-
tc.GlobalDebugInit(["--dump_cuda=true"])
23+
tc.SetDebugFlags(dump_cuda=True)
2524

2625

2726
class TestDumpCuda(unittest.TestCase):

test_python/layers/test_layernorm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import torch.cuda
2020
import unittest
2121

22-
tc.GlobalDebugInit(["--debug_tuner=false", "--debug_tc_mapper=false"])
22+
tc.SetDebugFlags(debug_tuner=False, debug_tc_mapper=False)
2323

2424

2525
class TestLayerNorm(unittest.TestCase):

test_python/test_debug_init.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import torch.cuda
2020
import tensor_comprehensions as tc
2121

22-
tc.GlobalDebugInit(["--dump_cuda=true", "--debug_tc_mapper=false"])
22+
tc.SetDebugFlags(dump_cuda=True, debug_tc_mapper=True)
2323

2424

2525
class TestDebugInit(unittest.TestCase):

test_python/test_tc_torch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from tensor_comprehensions.mapping_options import Options
2525
from common import TestCase, run_tests
2626

27-
tc.GlobalDebugInit(["--dump_cuda=false"])
27+
tc.SetDebugFlags(dump_cuda=False)
2828

2929

3030
MATMUL_LANG = """

0 commit comments

Comments
 (0)