Skip to content

Commit 1dfda07

Browse files
committed
fix linting
1 parent be9a9f8 commit 1dfda07

File tree

3 files changed

+17
-10
lines changed

3 files changed

+17
-10
lines changed

src/qonnx/core/datatype.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ def to_numpy_dt(self):
144144
def get_canonical_name(self):
145145
return "FLOAT32"
146146

147+
147148
class Float16Type(BaseDataType):
148149
def bitwidth(self):
149150
return 16
@@ -175,6 +176,7 @@ def to_numpy_dt(self):
175176
def get_canonical_name(self):
176177
return "FLOAT16"
177178

179+
178180
class IntType(BaseDataType):
179181
def __init__(self, bitwidth, signed):
180182
super().__init__()

src/qonnx/util/inference_cost.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
from qonnx.transformation.infer_datatypes import InferDataTypes
4545
from qonnx.transformation.infer_shapes import InferShapes
4646

47+
4748
def compute_bops_and_macs(inf_cost_dict):
4849
total_bops = 0.0
4950
total_macs = 0.0
@@ -56,6 +57,7 @@ def compute_bops_and_macs(inf_cost_dict):
5657
total_macs += v
5758
return total_bops, total_macs
5859

60+
5961
def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"):
6062
total_mem_bits = 0.0
6163
total_mem_elems = 0.0
@@ -67,6 +69,7 @@ def compute_mem_bits_and_elems(inf_cost_dict, filter_string="mem_w"):
6769
total_mem_elems += v
6870
return total_mem_bits, total_mem_elems
6971

72+
7073
def assign_mem_bits_and_elems(res_dict):
7174
mem_w_bits, mem_w_elems = compute_mem_bits_and_elems(res_dict, "mem_w")
7275
mem_o_bits, mem_o_elems = compute_mem_bits_and_elems(res_dict, "mem_o")
@@ -76,6 +79,7 @@ def assign_mem_bits_and_elems(res_dict):
7679
res_dict["total_mem_o_elems"] = mem_o_elems
7780
return res_dict
7881

82+
7983
def inference_cost(
8084
model_filename_or_wrapper,
8185
*,
@@ -96,7 +100,7 @@ def inference_cost(
96100
datatype inference and constant folding. Strongly recommended.
97101
:param discount_sparsity: If set, will discount op cost of MAC ops with a
98102
constant zero weight, and the mem cost of constant zero weights."""
99-
103+
100104
combined_results = {}
101105
if isinstance(model_filename_or_wrapper, ModelWrapper):
102106
model = model_filename_or_wrapper
@@ -117,8 +121,7 @@ def inference_cost(
117121
model = model.transform(GiveReadableTensorNames())
118122
if output_onnx is not None:
119123
model.save(output_onnx)
120-
ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity,
121-
cost_breakdown))
124+
ret = model.analysis(lambda x: infca.inference_cost(x, discount_sparsity, cost_breakdown))
122125
for i, res in ret.items():
123126
if i == "total_cost":
124127
bops, macs = compute_bops_and_macs(res)
@@ -148,9 +151,11 @@ def inference_cost(
148151
per_node_breakdown[node_name] = node_res
149152
combined_results[i] = per_node_breakdown
150153
return combined_results
151-
154+
155+
152156
def main():
153157
clize.run(inference_cost)
154158

159+
155160
if __name__ == "__main__":
156161
main()

tests/analysis/test_matmul_mac_cost.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,19 @@
2727
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2828

2929

30-
import pytest
31-
import qonnx
3230
from pkgutil import get_data
31+
3332
import qonnx.util.inference_cost as infc
34-
from qonnx.util.cleanup import cleanup_model
3533
from qonnx.core.modelwrapper import ModelWrapper
34+
from qonnx.util.cleanup import cleanup_model
3635

3736

3837
def test_matmul_mac_cost():
39-
raw_model = get_data("qonnx","data/onnx/matmul_update/sdp.onnx")
38+
raw_model = get_data("qonnx", "data/onnx/matmul_update/sdp.onnx")
4039
model = ModelWrapper(raw_model)
4140
cleaned_model = cleanup_model(model)
42-
# Two Matmul layers with shape (i_shape, w_shape, o_shape), L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32])
41+
# Two Matmul layers with shape (i_shape, w_shape, o_shape),
42+
# L1: ([4, 64, 32], [4, 32, 64], [4, 64, 64]) and L2: ([4, 64, 64], [4, 64, 32], [4, 64, 32])
4343
inf_cost_dict = infc.inference_cost(cleaned_model, discount_sparsity=False)
44-
mac_cost = inf_cost_dict['op_mac_FLOAT32_FLOAT32'] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576
44+
mac_cost = inf_cost_dict["op_mac_FLOAT32_FLOAT32"] # Expected mac cost 4*32*64*64 + 4*64*64*32 = 1048576
4545
assert mac_cost == 1048576.0, "Error: discrepancy in mac cost."

0 commit comments

Comments
 (0)