Skip to content

Commit f48676d

Browse files
update tests
1 parent e798ea8 commit f48676d

File tree

2 files changed

+16
-25
lines changed

2 files changed

+16
-25
lines changed

tests/unitary/with_extras/aqua/test_data/finetuning/ft_config.json

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,13 @@
11
{
22
"configuration": {
3-
"adapter": "lora",
4-
"bf16": true,
5-
"flash_attention": true,
6-
"fp16": false,
7-
"gradient_accumulation_steps": 1,
8-
"gradient_checkpointing": true,
93
"learning_rate": 0.0002,
10-
"logging_steps": 1,
114
"lora_alpha": 16,
125
"lora_dropout": 0.05,
136
"lora_r": 32,
147
"lora_target_linear": true,
15-
"lora_target_modules": [
16-
"q_proj",
17-
"k_proj"
18-
],
19-
"lr_scheduler": "cosine",
208
"micro_batch_size": 1,
21-
"optimizer": "adamw_torch",
229
"pad_to_sequence_len": true,
23-
"sample_packing": true,
2410
"sequence_len": 2048,
25-
"tf32": false,
2611
"val_set_size": 0.1
2712
},
2813
"shape": {

tests/unitary/with_extras/aqua/test_finetuning.py

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/usr/bin/env python
22
# -*- coding: utf-8 -*--
33

4-
# Copyright (c) 2024 Oracle and/or its affiliates.
4+
# Copyright (c) 2024, 2025 Oracle and/or its affiliates.
55
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
66

77
import os
@@ -11,7 +11,6 @@
1111
from unittest import TestCase
1212
from unittest.mock import MagicMock, PropertyMock
1313
from mock import patch
14-
from dataclasses import asdict
1514
from importlib import reload
1615

1716
import ads.aqua
@@ -147,7 +146,7 @@ def test_create_fine_tuning(
147146

148147
aqua_ft_summary = self.app.create(**create_aqua_ft_details)
149148

150-
assert asdict(aqua_ft_summary) == {
149+
assert aqua_ft_summary.to_dict() == {
151150
"console_url": f"https://cloud.oracle.com/data-science/models/{ft_model.id}?region={self.app.region}",
152151
"experiment": {
153152
"id": f"{mock_mvs_create.return_value[0]}",
@@ -267,15 +266,14 @@ def test_get_finetuning_default_params(self):
267266
params_dict = {
268267
"params": {
269268
"batch_size": 1,
269+
"val_set_size": 0.1,
270270
"sequence_len": 2048,
271-
"sample_packing": True,
272271
"pad_to_sequence_len": True,
273272
"learning_rate": 0.0002,
274273
"lora_r": 32,
275274
"lora_alpha": 16,
276275
"lora_dropout": 0.05,
277276
"lora_target_linear": True,
278-
"lora_target_modules": ["q_proj", "k_proj"],
279277
}
280278
}
281279
config_json = os.path.join(self.curr_dir, "test_data/finetuning/ft_config.json")
@@ -308,16 +306,24 @@ def test_get_finetuning_default_params(self):
308306
},
309307
True,
310308
),
309+
(
310+
{"optimizer": "adamw_torch"},
311+
False,
312+
),
311313
(
312314
{
313-
"micro_batch_size": 1,
314-
"max_sequence_len": 2048,
315-
"flash_attention": True,
316-
"pad_to_sequence_len": True,
317-
"lr_scheduler": "cosine",
315+
"epochs": [2],
318316
},
319317
False,
320318
),
319+
(
320+
{
321+
"epochs": 2,
322+
"load_best_model_at_end": True,
323+
"metric_for_best_model": "accuracy",
324+
},
325+
True,
326+
),
321327
]
322328
)
323329
def test_validate_finetuning_params(self, params, is_valid):

0 commit comments

Comments
 (0)