Skip to content

Commit 6a8887f

Browse files
authored
fix torchao quantized model in fbcode (#2396)
Summary: Without this change, ran into: > AttributeError: module 'fbgemm_gpu' has no attribute '__version__' Differential Revision: D76858513
1 parent 282d04f commit 6a8887f

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

torchao/quantization/quant_api.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@
8181
is_MI300,
8282
is_sm_at_least_89,
8383
is_sm_at_least_90,
84+
is_fbcode,
8485
)
8586

8687
from .autoquant import AutoQuantizableLinearWeight, autoquant
@@ -2010,7 +2011,7 @@ def _(module: torch.nn.Module, config: FbgemmConfig) -> torch.nn.Module:
20102011

20112012
import fbgemm_gpu.experimental.gen_ai # noqa: F401
20122013

2013-
if fbgemm_gpu.__version__ < "1.2.0":
2014+
if not is_fbcode() and fbgemm_gpu.__version__ < "1.2.0":
20142015
raise ImportError("Requires fbgemm-gpu-genai >= 1.2.0")
20152016

20162017
_SUPPORTED_DTYPES = {

0 commit comments

Comments
 (0)