Skip to content

Commit d5b2213

Browse files
committed
Address pylint warnings
1 parent 2925853 commit d5b2213

File tree

4 files changed

+7
-9
lines changed

4 files changed

+7
-9
lines changed

tools/accuracy_checker/accuracy_checker/annotation_converters/sentence_similarity.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ def convert_single_example(self, example, min_score=None, max_score=None):
150150
tokens.append("[CLS]" if self.support_vocab else CLS_ID)
151151
segment_ids.append(SEG_ID_CLS)
152152
else:
153-
tokens = self.tokenizer.tokenize(example.text, add_special_tokens=True)
153+
tokens = self.tokenizer.tokenize(example.text)
154154
segment_ids = [SEG_ID_A] * len(tokens)
155155

156156
if len(tokens) > self.max_seq_length:

tools/accuracy_checker/accuracy_checker/annotation_converters/text_classification.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,8 @@ def convert_single_example(self, example): # pylint:disable=R0912
156156
segment_ids.append(SEG_ID_CLS)
157157
else:
158158
if example.text_b:
159-
tokens = self.tokenizer.tokenize((example.text_a, example.text_b), add_special_tokens=True)
160-
len_tokens_a = len(self.tokenizer.tokenize(example.text_a, add_special_tokens=True))
159+
tokens = self.tokenizer.tokenize((example.text_a, example.text_b))
160+
len_tokens_a = len(self.tokenizer.tokenize(example.text_a))
161161
segment_ids = [SEG_ID_A] * len_tokens_a + [SEG_ID_B] * (len(tokens) - len_tokens_a)
162162
else:
163163
tokens = self.tokenizer.tokenize(example.text_a)

tools/accuracy_checker/accuracy_checker/launcher/launcher.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,8 +263,7 @@ def _provide_inputs_info_to_meta(self, meta):
263263

264264
return meta
265265

266-
@staticmethod
267-
def fit_to_input(data, layer_name, layout, precision, template=None):
266+
def fit_to_input(self, data, layer_name, layout, precision, template=None):
268267
layout_used = False
269268
if layout is not None and len(np.shape(data)) == len(layout):
270269
data = np.transpose(data, layout)

tools/accuracy_checker/accuracy_checker/launcher/pytorch_launcher.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ def load_module(self, model_cls, module_args, module_kwargs, checkpoint=None, st
156156
model_cls = module_parts[-1]
157157
model_path = ".".join(module_parts[:-1])
158158
with append_to_path(python_path):
159-
model_cls = importlib.import_module(model_path).__getattribute__(model_cls)
159+
model_cls = getattr(importlib.import_module(model_path), model_cls)
160160
module = model_cls(*module_args, **module_kwargs)
161161
if init_method is not None:
162162
if hasattr(model_cls, init_method):
@@ -247,16 +247,15 @@ def _convert_to_numpy(self, input_dict):
247247
def forward(self, outputs):
248248
if hasattr(outputs, 'logits') and 'logits' in self.output_names:
249249
return {'logits': outputs.logits}
250-
elif hasattr(outputs, 'last_hidden_state') and 'last_hidden_state' in self.output_names:
250+
if hasattr(outputs, 'last_hidden_state') and 'last_hidden_state' in self.output_names:
251251
return {'last_hidden_state': outputs.last_hidden_state}
252252
return list(outputs)
253253

254254
def predict(self, inputs, metadata=None, **kwargs):
255255
results = []
256256
with self._torch.no_grad():
257257
for batch_input in inputs:
258-
if metadata[0].get('input_is_dict_type') or \
259-
(isinstance(batch_input, dict) and 'input' in batch_input):
258+
if metadata[0].get('input_is_dict_type') or (isinstance(batch_input, dict) and 'input' in batch_input):
260259
outputs = self.module(batch_input['input'])
261260
else:
262261
outputs = self.module(**batch_input)

0 commit comments

Comments
 (0)