|
1 | 1 | #!/usr/bin/env python
|
2 | 2 | # -*- coding: utf-8 -*--
|
3 | 3 |
|
4 |
| -# Copyright (c) 2024 Oracle and/or its affiliates. |
| 4 | +# Copyright (c) 2024, 2025 Oracle and/or its affiliates. |
5 | 5 | # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
|
6 | 6 |
|
7 | 7 | import os
|
|
11 | 11 | from unittest import TestCase
|
12 | 12 | from unittest.mock import MagicMock, PropertyMock
|
13 | 13 | from mock import patch
|
14 |
| -from dataclasses import asdict |
15 | 14 | from importlib import reload
|
16 | 15 |
|
17 | 16 | import ads.aqua
|
@@ -147,7 +146,7 @@ def test_create_fine_tuning(
|
147 | 146 |
|
148 | 147 | aqua_ft_summary = self.app.create(**create_aqua_ft_details)
|
149 | 148 |
|
150 |
| - assert asdict(aqua_ft_summary) == { |
| 149 | + assert aqua_ft_summary.to_dict() == { |
151 | 150 | "console_url": f"https://cloud.oracle.com/data-science/models/{ft_model.id}?region={self.app.region}",
|
152 | 151 | "experiment": {
|
153 | 152 | "id": f"{mock_mvs_create.return_value[0]}",
|
@@ -267,15 +266,14 @@ def test_get_finetuning_default_params(self):
|
267 | 266 | params_dict = {
|
268 | 267 | "params": {
|
269 | 268 | "batch_size": 1,
|
| 269 | + "val_set_size": 0.1, |
270 | 270 | "sequence_len": 2048,
|
271 |
| - "sample_packing": True, |
272 | 271 | "pad_to_sequence_len": True,
|
273 | 272 | "learning_rate": 0.0002,
|
274 | 273 | "lora_r": 32,
|
275 | 274 | "lora_alpha": 16,
|
276 | 275 | "lora_dropout": 0.05,
|
277 | 276 | "lora_target_linear": True,
|
278 |
| - "lora_target_modules": ["q_proj", "k_proj"], |
279 | 277 | }
|
280 | 278 | }
|
281 | 279 | config_json = os.path.join(self.curr_dir, "test_data/finetuning/ft_config.json")
|
@@ -308,16 +306,24 @@ def test_get_finetuning_default_params(self):
|
308 | 306 | },
|
309 | 307 | True,
|
310 | 308 | ),
|
| 309 | + ( |
| 310 | + {"optimizer": "adamw_torch"}, |
| 311 | + False, |
| 312 | + ), |
311 | 313 | (
|
312 | 314 | {
|
313 |
| - "micro_batch_size": 1, |
314 |
| - "max_sequence_len": 2048, |
315 |
| - "flash_attention": True, |
316 |
| - "pad_to_sequence_len": True, |
317 |
| - "lr_scheduler": "cosine", |
| 315 | + "epochs": [2], |
318 | 316 | },
|
319 | 317 | False,
|
320 | 318 | ),
|
| 319 | + ( |
| 320 | + { |
| 321 | + "epochs": 2, |
| 322 | + "load_best_model_at_end": True, |
| 323 | + "metric_for_best_model": "accuracy", |
| 324 | + }, |
| 325 | + True, |
| 326 | + ), |
321 | 327 | ]
|
322 | 328 | )
|
323 | 329 | def test_validate_finetuning_params(self, params, is_valid):
|
|
0 commit comments