Skip to content

Commit 395eedb

Browse files
authored
[Examples] Add SFT disclaimers (#1640)
## Purpose ## * Add disclaimers about SFT, which is more effective in other training libraries * Related to #1638 Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
1 parent bbdd770 commit 395eedb

File tree

3 files changed

+15
-0
lines changed

3 files changed

+15
-0
lines changed

examples/quantization_2of4_sparse_w4a16/llama7b_sparse_w4a16.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# NOTE: Fine tuning can require more steps than is shown in the example
2+
# See the Axolotl integration blog post for best fine tuning practices
3+
# https://developers.redhat.com/articles/2025/06/17/axolotl-meets-llm-compressor-fast-sparse-open
4+
15
from pathlib import Path
26

37
import torch
@@ -74,6 +78,7 @@
7478
)
7579

7680
# Sparse finetune
81+
# This step can be supplanted by fine tuning via integrated FT libraries such as Axolotl
7782
train(
7883
model=(output_path / "sparsity_stage"),
7984
**oneshot_kwargs,

examples/trl_mixin/ex_trl_constant.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# NOTE: Fine tuning can require more steps than is shown in the example
2+
# See the Axolotl integration blog post for best fine tuning practices
3+
# https://developers.redhat.com/articles/2025/06/17/axolotl-meets-llm-compressor-fast-sparse-open
4+
15
from datasets import load_dataset
26
from sft_trainer import SFTTrainer
37
from transformers import AutoModelForCausalLM, AutoTokenizer
@@ -46,6 +50,7 @@ def formatting_prompts_func(example):
4650
)
4751
model_args = ModelArguments(model=model)
4852

53+
# This step can be supplanted by fine tuning via integrated FT libraries such as Axolotl
4954
trainer = SFTTrainer(
5055
model=model,
5156
processing_class=tokenizer,

examples/trl_mixin/ex_trl_distillation.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# NOTE: Fine tuning can require more steps than is shown in the example
2+
# See the Axolotl integration blog post for best fine tuning practices
3+
# https://developers.redhat.com/articles/2025/06/17/axolotl-meets-llm-compressor-fast-sparse-open
4+
15
from sft_trainer import SFTTrainer
26
from transformers import AutoModelForCausalLM, AutoTokenizer
37

@@ -60,6 +64,7 @@
6064
)
6165
model_args = ModelArguments(model=model, distill_teacher=teacher)
6266

67+
# This step can be supplanted by fine tuning via integrated FT libraries such as Axolotl
6368
trainer = SFTTrainer(
6469
model=model,
6570
teacher=teacher,

0 commit comments

Comments
 (0)