-
Notifications
You must be signed in to change notification settings - Fork 282
Description
Traceback (most recent call last):
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/quantization.py", line 218, in fit
strategy.traverse()
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/strategy/auto.py", line 140, in traverse
super().traverse()
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/strategy/strategy.py", line 503, in traverse
q_model = self.adaptor.quantize(copy.deepcopy(tune_cfg), self.model, self.calib_dataloader, self.q_func)
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/utils/utility.py", line 426, in fi
res = func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/adaptor/pytorch.py", line 4565, in quantize
q_model._model, gptq_config = self.gptq_quantize(q_model._model, tune_cfg, dataloader)
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/adaptor/pytorch.py", line 4719, in gptq_quantize
model, quantization_perm = gptq_quantize(
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/adaptor/torch_utils/weight_only.py", line 518, in gptq_quantize
fp32_modified_model, gptq_config = gptq_quantizer.execute_quantization(model_path=model_path)
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/neural_compressor/adaptor/torch_utils/gptq.py", line 681, in execute_quantization
out = transformer_block(*cache_positional_batch, **cache_keyword_batch)
File "/usr/local/lib/python3.10/dist-packages/transformers/modeling_layers.py", line 93, in call
return super().call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/transformers/models/qwen3/modeling_qwen3.py", line 257, in forward
hidden_states, _ = self.self_attn(
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1562, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/transformers/models/qwen3/modeling_qwen3.py", line 202, in forward
cos, sin = position_embeddings
TypeError: cannot unpack non-iterable NoneType object