44
44
from qonnx .transformation .infer_datatypes import InferDataTypes
45
45
from qonnx .transformation .infer_shapes import InferShapes
46
46
47
-
48
47
def compute_bops_and_macs (inf_cost_dict ):
49
48
total_bops = 0.0
50
49
total_macs = 0.0
@@ -57,7 +56,6 @@ def compute_bops_and_macs(inf_cost_dict):
57
56
total_macs += v
58
57
return total_bops , total_macs
59
58
60
-
61
59
def compute_mem_bits_and_elems (inf_cost_dict , filter_string = "mem_w" ):
62
60
total_mem_bits = 0.0
63
61
total_mem_elems = 0.0
@@ -98,6 +96,7 @@ def inference_cost(
98
96
datatype inference and constant folding. Strongly recommended.
99
97
:param discount_sparsity: If set, will discount op cost of MAC ops with a
100
98
constant zero weight, and the mem cost of constant zero weights."""
99
+
101
100
combined_results = {}
102
101
if isinstance (model_filename_or_wrapper , ModelWrapper ):
103
102
model = model_filename_or_wrapper
@@ -118,7 +117,8 @@ def inference_cost(
118
117
model = model .transform (GiveReadableTensorNames ())
119
118
if output_onnx is not None :
120
119
model .save (output_onnx )
121
- ret = model .analysis (lambda x : infca .inference_cost (x , discount_sparsity , cost_breakdown ))
120
+ ret = model .analysis (lambda x : infca .inference_cost (x , discount_sparsity ,
121
+ cost_breakdown ))
122
122
for i , res in ret .items ():
123
123
if i == "total_cost" :
124
124
bops , macs = compute_bops_and_macs (res )
@@ -148,10 +148,9 @@ def inference_cost(
148
148
per_node_breakdown [node_name ] = node_res
149
149
combined_results [i ] = per_node_breakdown
150
150
return combined_results
151
-
151
+
152
152
def main ():
153
153
clize .run (inference_cost )
154
154
155
-
156
155
if __name__ == "__main__" :
157
156
main ()
0 commit comments