Skip to content

Commit 2114ea7

Browse files
authored
Merge pull request #21 from neph1/update-v0.11.0
add wan and cogvideo to model options
2 parents f4cdf9b + f530cbc commit 2114ea7

File tree

3 files changed

+9
-7
lines changed

3 files changed

+9
-7
lines changed

app.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import gradio as gr
44

5+
from tabs.dataset_tab import DatasetTab
56
from tabs.general_tab import GeneralTab
67
from tabs.prepare_tab import PrepareDatasetTab
78
from tabs.tool_tab import ToolTab
@@ -28,16 +29,16 @@ def setup_views(self):
2829
with gr.Tab("General Settings"):
2930
self.tabs['general'] = GeneralTab("General Settings", os.path.join(self.configs_path, "editor.yaml"))
3031
runtime_tab = gr.Tab("Trainer Settings")
31-
tools_tab = gr.Tab("Lora Tools")
3232

3333
prepare_tab = gr.Tab("Prepare dataset (Legacy)")
3434
runtime_tab_legacy = gr.Tab("Legacy Training Settings")
35+
dataset_tab = gr.Tab("Dataset")
3536

3637
with runtime_tab:
3738
self.tabs['runtime'] = TrainingTab("Trainer Settings", os.path.join(self.configs_path, "config_template.yaml"), allow_load=True)
3839

39-
with tools_tab:
40-
self.tabs['tools'] = ToolTab()
40+
with dataset_tab:
41+
self.tabs['dataset'] = DatasetTab()
4142

4243
with prepare_tab:
4344
self.tabs['prepare'] = PrepareDatasetTab("Prepare dataset (Legacy)", os.path.join(self.configs_path, "prepare_template.yaml"), allow_load=True)

config/config_template.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ lr_num_cycles: 1
3030
lr_scheduler: ['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup']
3131
lr_warmup_steps: 400
3232
max_grad_norm: 1.0
33-
model_name: ['ltx_video', 'hunyuan_video']
33+
model_name: ['ltx_video', 'hunyuan_video', 'wan', 'cogvideox']
3434
nccl_timeout: 1800
3535
num_validation_videos: 0
3636
optimizer: adamw
@@ -49,7 +49,7 @@ text_encoder_3_dtype: [bf16, fp16, fp32]
4949
tracker_name: finetrainers
5050
transformer_dtype: [bf16, fp16, fp32]
5151
train_steps: 3000
52-
training_type: lora
52+
training_type: ['lora', 'full-finetune']
5353
use_8bit_bnb: false
5454
vae_dtype: [bf16, fp16, fp32]
5555
validation_epochs: 0

run_trainer.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,8 @@ def run(self, config: Config, finetrainers_path: str, log_file: str):
4848

4949
dataloader_cmd = ["--dataloader_num_workers", config.get('dataloader_num_workers')]
5050

51-
diffusion_cmd = [config.get('diffusion_options')]
51+
# TODO: seems to have changed, need full options
52+
#diffusion_cmd = [config.get('diffusion_options')]
5253

5354
training_cmd = ["--training_type", config.get('training_type'),
5455
"--seed", config.get('seed'),
@@ -91,7 +92,7 @@ def run(self, config: Config, finetrainers_path: str, log_file: str):
9192
"--nccl_timeout", config.get('nccl_timeout'),
9293
"--report_to", config.get('report_to')]
9394
accelerate_cmd = ["accelerate", "launch", "--config_file", f"{finetrainers_path}/accelerate_configs/{config.get('accelerate_config')}", "--gpu_ids", config.get('gpu_ids')]
94-
cmd = accelerate_cmd + [f"{finetrainers_path}/train.py"] + model_cmd + dataset_cmd + dataloader_cmd + diffusion_cmd + training_cmd + optimizer_cmd + validation_cmd + miscellaneous_cmd
95+
cmd = accelerate_cmd + [f"{finetrainers_path}/train.py"] + model_cmd + dataset_cmd + dataloader_cmd + training_cmd + optimizer_cmd + validation_cmd + miscellaneous_cmd
9596
fixed_cmd = []
9697
for i in range(len(cmd)):
9798
if cmd[i] != '':

0 commit comments

Comments
 (0)