We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7531603 commit ab9dee0Copy full SHA for ab9dee0
src/llmcompressor/modifiers/pruning/wanda/base.py
@@ -36,9 +36,6 @@ class WandaPruningModifier(SparsityModifierBase):
36
Lifecycle:
37
- on_initialize
38
- register_hook(module, calibrate_module, "forward")
39
- - run_sequential / run_layer_sequential / run_basic
40
- - make_empty_row_scalars
41
- - accumulate_row_scalars
42
- on_sequential_batch_end
43
- sparsify_weight
44
- on_finalize
src/llmcompressor/modifiers/quantization/gptq/base.py
@@ -61,7 +61,7 @@ class GPTQModifier(Modifier, QuantizationMixin):
61
62
63
64
- - apply config to model
+ - apply quantization config to model
65
- on_start
66
- add activation calibration hooks
67
- add gptq weight calibration hooks
0 commit comments