From 09564c386b52655d73515e5410ffc59ce16fca54 Mon Sep 17 00:00:00 2001 From: Jovan Mitrevski Date: Thu, 8 Feb 2024 14:57:29 -0600 Subject: [PATCH] remove double transpose --- hls4ml/model/layers.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/hls4ml/model/layers.py b/hls4ml/model/layers.py index b74918f642..fbb7330d37 100644 --- a/hls4ml/model/layers.py +++ b/hls4ml/model/layers.py @@ -557,16 +557,8 @@ def _get_folded_weights(self): def initialize(self): super().initialize() folded_weights, folded_bias = self._get_folded_weights() - if self.model.config.is_resource_strategy(self) and self.model.config.backend.name in [ - 'Vivado', - 'VivadoAccelerator', - ]: - self.weights['weight'].data_unquantized = np.transpose(folded_weights, axes=[3, 0, 1, 2]) - self.weights['weight'].data = self.get_attr('weight_quantizer')(self.weights['weight'].data_unquantized) - - else: - self.weights['weight'].data_unquantized = folded_weights - self.weights['weight'].data = self.get_attr('weight_quantizer')(folded_weights) + self.weights['weight'].data_unquantized = folded_weights + self.weights['weight'].data = self.get_attr('weight_quantizer')(folded_weights) self.weights['bias'].data_unquantized = folded_bias bias_q = self.get_attr('bias_quantizer') if bias_q is not None: