We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9429faa commit ae7a65fCopy full SHA for ae7a65f
tests/kernels/moe/utils.py
@@ -163,6 +163,7 @@ def make_quantized_test_activations(
163
a_scale = torch.stack(a_scale)
164
165
if not per_act_token_quant and block_shape is None:
166
+ assert a_scale is not None
167
a_scale = a_scale.view(E, 1, 1)
168
169
return a, a_q, a_scale
tests/kernels/utils.py
@@ -1115,6 +1115,8 @@ def torch_experts(
1115
w2_scale[i], block_shape,
1116
out.dtype)
1117
else:
1118
+ assert (a_scale is not None and w1_scale is not None
1119
+ and w2_scale is not None)
1120
f32 = torch.float32
1121
scales = a_scale if a_scale.numel() == 1 else a_scale[mask]
1122
tmp1 = a[mask].to(f32) * scales
0 commit comments