Skip to content

Commit abd3423

Browse files
committed
format with ruff
1 parent cd11ae7 commit abd3423

File tree

113 files changed

+671
-673
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

113 files changed

+671
-673
lines changed

docs/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN):
9999

100100
# -- Extension configuration -------------------------------------------------
101101
html_show_sourcelink = False
102-
html_logo = "img/hls4ml_logo_navbar.png"
102+
html_logo = 'img/hls4ml_logo_navbar.png'
103103

104104
html_theme_options = {
105105
'canonical_url': '',
@@ -120,7 +120,7 @@ def get_pypi_version(package, url_pattern=URL_PATTERN):
120120
html_context = {
121121
'display_github': True, # Integrate GitHub
122122
'github_user': 'fastmachinelearning', # Username
123-
'github_repo': "hls4ml", # Repo name
123+
'github_repo': 'hls4ml', # Repo name
124124
'github_version': 'main', # Version
125125
'conf_py_path': '/docs/', # Path in the checkout to the docs root
126126
}

hls4ml/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
from hls4ml import converters, report, utils # noqa: F401, E402
1+
from hls4ml import E402, converters, report, utils
22

33
try:
44
from ._version import version as __version__
55
from ._version import version_tuple
66
except ImportError:
7-
__version__ = "unknown version"
8-
version_tuple = (0, 0, "unknown version")
7+
__version__ = 'unknown version'
8+
version_tuple = (0, 0, 'unknown version')
99

1010

1111
def reseed(newseed):

hls4ml/backends/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend # noqa: F401
2-
from hls4ml.backends.fpga.fpga_backend import FPGABackend # noqa: F401
1+
from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend
2+
from hls4ml.backends.fpga.fpga_backend import FPGABackend
33
from hls4ml.backends.oneapi.oneapi_backend import OneAPIBackend
44
from hls4ml.backends.quartus.quartus_backend import QuartusBackend
55
from hls4ml.backends.symbolic.symbolic_backend import SymbolicExpressionBackend
66
from hls4ml.backends.vivado.vivado_backend import VivadoBackend
77
from hls4ml.backends.vivado_accelerator.vivado_accelerator_backend import VivadoAcceleratorBackend
8-
from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig # noqa: F401
8+
from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig
99

1010
from hls4ml.backends.catapult.catapult_backend import CatapultBackend # isort: skip
1111

hls4ml/backends/catapult/catapult_backend.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ def build(
251251
ccs_args = f'"reset={reset} csim={csim} synth={synth} cosim={cosim} validation={validation}'
252252
ccs_args += f' export={export} vsynth={vsynth} fifo_opt={fifo_opt} bitfile={bitfile} ran_frame={ran_frame}'
253253
ccs_args += f' sw_opt={sw_opt} power={power} da={da} vhdl={vhdl} verilog={verilog} bup={bup}"'
254-
ccs_invoke = catapult_exe + ' -product ultra -shell -f build_prj.tcl -eval \'set ::argv ' + ccs_args + '\''
254+
ccs_invoke = catapult_exe + " -product ultra -shell -f build_prj.tcl -eval 'set ::argv " + ccs_args + "'"
255255
print(ccs_invoke)
256256
os.system(ccs_invoke)
257257
os.chdir(curr_dir)
@@ -455,9 +455,9 @@ def init_global_pooling2d(self, layer):
455455
@layer_optimizer(Softmax)
456456
def init_softmax(self, layer):
457457
if layer.model.config.get_config_value('IOType') == 'io_parallel':
458-
assert (
459-
len(layer.get_input_variable().shape) == 1
460-
), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'
458+
assert len(layer.get_input_variable().shape) == 1, (
459+
'Softmax with io_parallel strategy cannot be used on multidimensional tensors.'
460+
)
461461

462462
@layer_optimizer(Embedding)
463463
def init_embed(self, layer):

hls4ml/backends/catapult/passes/broadcast_stream.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77

88
class Broadcast(Layer):
9-
'''Inserted between layers for broadcasting.'''
9+
"""Inserted between layers for broadcasting."""
1010

1111
def initialize(self):
1212
shape = self.attributes['target_shape']

hls4ml/backends/catapult/passes/conv_stream.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44

55
class GenerateConvStreamingInstructions(OptimizerPass):
6-
'''Generates the instructions for streaming implementation of CNNs'''
6+
"""Generates the instructions for streaming implementation of CNNs"""
77

88
def match(self, node):
99
is_match = (

hls4ml/backends/catapult/passes/convolution_winograd.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77

88

99
class ApplyWinogradKernelTransformation(OptimizerPass):
10-
'''
10+
"""
1111
Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution
1212
For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks
13-
'''
13+
"""
1414

1515
def match(self, node):
1616
node_matches = isinstance(node, (Conv1D, Conv2D))

hls4ml/backends/catapult/passes/fifo_depth_optimization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,9 @@ def transform(self, model):
8282

8383
if len(data['children']) == 0:
8484
print(
85-
"FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible."
85+
'FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible.'
8686
)
87-
print("Consider increasing profiling_fifo_depth.")
87+
print('Consider increasing profiling_fifo_depth.')
8888
return False
8989

9090
n_elem = len(data['children'][0]['children'][0]['children'])

hls4ml/backends/catapult/passes/resource_strategy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
class ApplyResourceStrategy(OptimizerPass):
8-
'''Transposes the weights to use the dense_resource matrix multiply routine'''
8+
"""Transposes the weights to use the dense_resource matrix multiply routine"""
99

1010
def match(self, node):
1111
node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU))

hls4ml/backends/fpga/fpga_backend.py

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -346,9 +346,9 @@ def convert_precision_string(cls, precision):
346346

347347
@classmethod
348348
def _convert_ap_type(cls, precision):
349-
'''
349+
"""
350350
Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc)
351-
'''
351+
"""
352352
bits = re.search('.+<(.+?)>', precision).group(1).split(',')
353353
sat_mode = None
354354
round_mode = None
@@ -357,12 +357,12 @@ def _convert_ap_type(cls, precision):
357357
width = int(bits[0])
358358
integer = int(bits[1])
359359
fields = 2
360-
signed = not ('u' in precision)
360+
signed = 'u' not in precision
361361
elif 'int' in precision:
362362
width = int(bits[0])
363363
integer = width
364364
fields = 1
365-
signed = not ('u' in precision)
365+
signed = 'u' not in precision
366366
if len(bits) > fields:
367367
round_mode = bits[fields]
368368
if len(bits) > fields + 1:
@@ -376,9 +376,9 @@ def _convert_ap_type(cls, precision):
376376

377377
@classmethod
378378
def _convert_ac_type(cls, precision):
379-
'''
379+
"""
380380
Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc)
381-
'''
381+
"""
382382
bits = re.search('.+<(.+?)>', precision).group(1).split(',')
383383
signed = True # default is signed
384384
sat_mode = None
@@ -414,18 +414,18 @@ def _convert_ac_type(cls, precision):
414414

415415
@classmethod
416416
def _convert_auto_type(cls, precision):
417-
'''
417+
"""
418418
Convert a "auto" precision string into the UnspecifiedPrecisionType
419-
'''
419+
"""
420420
return UnspecifiedPrecisionType()
421421

422422
def product_type(self, data_T, weight_T):
423-
'''
423+
"""
424424
Helper function to determine which product implementation to use during inference
425-
'''
426-
assert not isinstance(
427-
data_T, ExponentPrecisionType
428-
), "Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data."
425+
"""
426+
assert not isinstance(data_T, ExponentPrecisionType), (
427+
"Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data."
428+
)
429429
product = 'mult'
430430
if isinstance(weight_T, ExponentPrecisionType):
431431
product = 'weight_exponential'
@@ -754,14 +754,14 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke
754754
im2col_matrix = self._compute_conv1d_im2col((in_W, in_C), kernel, stride, (pad_left, pad_right), dilation)
755755

756756
generated_code = (
757-
"template<class data_T, typename CONFIG_T>\n"
758-
"class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n"
759-
" public:\n"
760-
" static void fill_buffer(\n"
761-
" data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n"
762-
" data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n"
763-
" const unsigned partition\n"
764-
" ) {{\n"
757+
'template<class data_T, typename CONFIG_T>\n'
758+
'class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n'
759+
' public:\n'
760+
' static void fill_buffer(\n'
761+
' data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n'
762+
' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n'
763+
' const unsigned partition\n'
764+
' ) {{\n'
765765
).format(index=layer_idx)
766766
indent = ' '
767767

@@ -884,14 +884,14 @@ def generate_conv2d_line_buffer_fn(
884884
)
885885

886886
generated_code = (
887-
"template<class data_T, typename CONFIG_T>\n"
888-
"class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n"
889-
" public:\n"
890-
" static void fill_buffer(\n"
891-
" data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n"
892-
" data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n"
893-
" const unsigned partition\n"
894-
" ) {{\n"
887+
'template<class data_T, typename CONFIG_T>\n'
888+
'class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n'
889+
' public:\n'
890+
' static void fill_buffer(\n'
891+
' data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n'
892+
' data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n'
893+
' const unsigned partition\n'
894+
' ) {{\n'
895895
).format(index=layer_idx)
896896
indent = ' '
897897

0 commit comments

Comments
 (0)