Skip to content

Commit fa5f45e

Browse files
authored
Avoid assert error when there's bias
Differential Revision: D70604089 Pull Request resolved: #1839
1 parent ada4c02 commit fa5f45e

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

torchao/quantization/GPTQ.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -741,8 +741,7 @@ def _create_quantized_state_dict(
741741
) -> Dict[str, torch.Tensor]:
742742
cur_state_dict = model.state_dict()
743743
for fqn, mod in model.named_modules():
744-
if isinstance(mod, torch.nn.Linear):
745-
assert not mod.bias
744+
if isinstance(mod, torch.nn.Linear) and mod.bias is None:
746745
out_features = mod.out_features
747746
in_features = mod.in_features
748747
# assert out_features % 8 == 0, "require out_features % 8 == 0"
@@ -1131,8 +1130,7 @@ def _create_quantized_state_dict(
11311130
) -> Dict[str, torch.Tensor]:
11321131
cur_state_dict = model.state_dict()
11331132
for fqn, mod in model.named_modules():
1134-
if isinstance(mod, torch.nn.Linear):
1135-
assert not mod.bias
1133+
if isinstance(mod, torch.nn.Linear) and mod.bias is None:
11361134
out_features = mod.out_features
11371135
in_features = mod.in_features
11381136
# assert out_features % 8 == 0, "require out_features % 8 == 0"

0 commit comments

Comments
 (0)