Skip to content

Commit 344e7ae

Browse files
author
Vincent Moens
committed
Update
[ghstack-poisoned]
1 parent f70bc1b commit 344e7ae

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

torchrl/envs/transforms/transforms.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4943,15 +4943,15 @@ def call_tokenizer_fn(self, value: str | List[str]):
49434943
if isinstance(value, str):
49444944
out = self.tokenizer.encode(value, return_tensors="pt", **kwargs)[0]
49454945
# TODO: incorporate attention mask
4946-
attention_mask = torch.ones_like(out, dtype=torch.bool)
4946+
# attention_mask = torch.ones_like(out, dtype=torch.bool)
49474947
else:
49484948
kwargs["padding"] = (
49494949
self.padding if self.max_length is None else "max_length"
49504950
)
49514951
# kwargs["return_attention_mask"] = False
49524952
# kwargs["return_token_type_ids"] = False
49534953
out = self.tokenizer.batch_encode_plus(value, return_tensors="pt", **kwargs)
4954-
attention_mask = out["attention_mask"]
4954+
# attention_mask = out["attention_mask"]
49554955
out = out["input_ids"]
49564956

49574957
if device is not None and out.device != device:

0 commit comments

Comments
 (0)