Skip to content

Commit a70fc5d

Browse files
aorenstefacebook-github-bot
authored andcommitted
pyre-fixmes for D75477355 - FBGEMM (#4390)
Summary: Pull Request resolved: #4390 X-link: facebookresearch/FBGEMM#1461 Additional pyre-fixmes needed by pytorch change D75477355 that weren't caught by diff-time CI. Reviewed By: bobrenjc93 Differential Revision: D77120313 fbshipit-source-id: 24f61082ce573c9db1973782efe2e4f94cfd44bf
1 parent cc5fd8c commit a70fc5d

File tree

5 files changed

+16
-2
lines changed

5 files changed

+16
-2
lines changed

fbgemm_gpu/fbgemm_gpu/uvm.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
2222

2323
# Import all uvm enums from c++ library
24+
# pyre-fixme[6]: For 2nd argument expected `() -> List[Tuple[str, List[Tuple[str,
25+
# int]]]]` but got `OpOverloadPacket`.
2426
create_enums(globals(), torch.ops.fbgemm.fbgemm_gpu_uvm_enum_query)
2527

2628

fbgemm_gpu/test/release/stable_release_test.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,11 @@ def _test_stable_schema(self, version: str) -> None:
144144
op_name = full_op_name.split(".")[3]
145145

146146
check_schema_compatibility_from_op_name(
147-
torch.ops.fbgemm, op_name, ref_schema_str
147+
# pyre-fixme[6]: For 1st argument expected `(...) -> Any`
148+
# but got `_OpNamespace`.
149+
torch.ops.fbgemm,
150+
op_name,
151+
ref_schema_str,
148152
)
149153

150154
def test_backwards_compatibility(self) -> None:

fbgemm_gpu/test/sll/jagged_softmax_test.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,9 @@ def test_triton_jagged_softmax(
6666
padded_x1 - (1.0 - presences.unsqueeze(2).to(padded_x1.dtype)) * 5e7
6767
)
6868
padded_ref = torch.nn.functional.softmax(
69-
softmax_input.transpose(1, 2), dim=-1
69+
# pyre-fixme[16]: `float` has no attribute `transpose`.
70+
softmax_input.transpose(1, 2),
71+
dim=-1,
7072
) # [B, H, N]
7173
ref = torch.ops.fbgemm.dense_to_jagged(padded_ref.permute(0, 2, 1), [offsets])[
7274
0

fbgemm_gpu/test/tbe/eeg/tbe_indices_generator_test.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,5 +58,7 @@ def test_indices_generation(
5858

5959
assert indices.shape == (num_indices,)
6060
assert indices.dtype == torch.int64
61+
# pyre-fixme[6]: For 1st argument expected `Tensor` but got `bool`.
6162
assert not torch.any(indices > max_index)
63+
# pyre-fixme[6]: For 1st argument expected `Tensor` but got `bool`.
6264
assert not torch.any(indices < 0)

fbgemm_gpu/test/tbe/ssd/ssd_utils_test.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,8 @@ def test_masked_index_put(
121121
num_value_rows=num_indices,
122122
num_output_rows=num_output_rows,
123123
dtype=dtype,
124+
# pyre-fixme[6]: For 7th argument expected `(Tensor, Tensor, Tensor,
125+
# Tensor, bool) -> Tensor` but got `OpOverloadPacket`.
124126
test_fn=torch.ops.fbgemm.masked_index_put,
125127
is_index_put=True,
126128
use_pipeline=use_pipeline,
@@ -154,6 +156,8 @@ def test_masked_index_select(
154156
num_value_rows=num_value_rows,
155157
num_output_rows=num_indices,
156158
dtype=dtype,
159+
# pyre-fixme[6]: For 7th argument expected `(Tensor, Tensor, Tensor,
160+
# Tensor, bool) -> Tensor` but got `OpOverloadPacket`.
157161
test_fn=torch.ops.fbgemm.masked_index_select,
158162
is_index_put=False,
159163
use_pipeline=use_pipeline,

0 commit comments

Comments
 (0)