File tree Expand file tree Collapse file tree 3 files changed +15
-0
lines changed
quantization_2of4_sparse_w4a16 Expand file tree Collapse file tree 3 files changed +15
-0
lines changed Original file line number Diff line number Diff line change
1
+ # NOTE: Fine tuning can require more steps than is shown in the example
2
+ # See the Axolotl integration blog post for best fine tuning practices
3
+ # https://developers.redhat.com/articles/2025/06/17/axolotl-meets-llm-compressor-fast-sparse-open
4
+
1
5
from pathlib import Path
2
6
3
7
import torch
74
78
)
75
79
76
80
# Sparse finetune
81
+ # This step can be supplanted by fine tuning via integrated FT libraries such as Axolotl
77
82
train (
78
83
model = (output_path / "sparsity_stage" ),
79
84
** oneshot_kwargs ,
Original file line number Diff line number Diff line change
1
+ # NOTE: Fine tuning can require more steps than is shown in the example
2
+ # See the Axolotl integration blog post for best fine tuning practices
3
+ # https://developers.redhat.com/articles/2025/06/17/axolotl-meets-llm-compressor-fast-sparse-open
4
+
1
5
from datasets import load_dataset
2
6
from sft_trainer import SFTTrainer
3
7
from transformers import AutoModelForCausalLM , AutoTokenizer
@@ -46,6 +50,7 @@ def formatting_prompts_func(example):
46
50
)
47
51
model_args = ModelArguments (model = model )
48
52
53
+ # This step can be supplanted by fine tuning via integrated FT libraries such as Axolotl
49
54
trainer = SFTTrainer (
50
55
model = model ,
51
56
processing_class = tokenizer ,
Original file line number Diff line number Diff line change
1
+ # NOTE: Fine tuning can require more steps than is shown in the example
2
+ # See the Axolotl integration blog post for best fine tuning practices
3
+ # https://developers.redhat.com/articles/2025/06/17/axolotl-meets-llm-compressor-fast-sparse-open
4
+
1
5
from sft_trainer import SFTTrainer
2
6
from transformers import AutoModelForCausalLM , AutoTokenizer
3
7
60
64
)
61
65
model_args = ModelArguments (model = model , distill_teacher = teacher )
62
66
67
+ # This step can be supplanted by fine tuning via integrated FT libraries such as Axolotl
63
68
trainer = SFTTrainer (
64
69
model = model ,
65
70
teacher = teacher ,
You can’t perform that action at this time.
0 commit comments