|
| 1 | +import dspy |
| 2 | +from dspy import Example |
| 3 | +from dspy.predict import Predict |
| 4 | +from dspy.teleprompt import BootstrapFinetune |
| 5 | +from dspy.utils.dummies import DummyLM |
| 6 | + |
| 7 | + |
| 8 | +# Define a simple metric function for testing |
| 9 | +def simple_metric(example, prediction, trace=None): |
| 10 | + # Simplified metric for testing: true if prediction matches expected output |
| 11 | + return example.output == prediction.output |
| 12 | + |
| 13 | + |
| 14 | +examples = [ |
| 15 | + Example(input="What is the color of the sky?", output="blue").with_inputs("input"), |
| 16 | + Example(input="What does the fox say?", output="Ring-ding-ding-ding-dingeringeding!"), |
| 17 | +] |
| 18 | +trainset = [examples[0]] |
| 19 | + |
| 20 | + |
| 21 | +def test_bootstrap_finetune_initialization(): |
| 22 | + # Initialize BootstrapFinetune with a dummy metric and minimal setup |
| 23 | + bootstrap = BootstrapFinetune(metric=simple_metric) |
| 24 | + assert bootstrap.metric == simple_metric, "Metric not correctly initialized" |
| 25 | + assert bootstrap.multitask == True, "Multitask should default to True" |
| 26 | + |
| 27 | + |
| 28 | +class SimpleModule(dspy.Module): |
| 29 | + def __init__(self, signature, lm=None): |
| 30 | + super().__init__() |
| 31 | + self.predictor = Predict(signature) |
| 32 | + if lm: |
| 33 | + self.predictor.lm = lm |
| 34 | + |
| 35 | + def forward(self, **kwargs): |
| 36 | + return self.predictor(**kwargs) |
| 37 | + |
| 38 | + |
| 39 | +def test_compile_with_predict_instances_no_explicit_lm(): |
| 40 | + """Test BootstrapFinetune compile with predictors that don't have explicit LMs.""" |
| 41 | + from unittest.mock import patch |
| 42 | + |
| 43 | + # Create student and teacher modules without explicit LMs in predictors |
| 44 | + # This tests the fix: lm = pred.lm or settings.lm |
| 45 | + student = SimpleModule("input -> output") |
| 46 | + teacher = SimpleModule("input -> output") |
| 47 | + |
| 48 | + # Set up LM in settings - this will be the fallback |
| 49 | + lm = DummyLM(["Initial thoughts", "Finish[blue]"]) |
| 50 | + original_lm = dspy.settings.lm |
| 51 | + dspy.settings.configure(lm=lm) |
| 52 | + |
| 53 | + |
| 54 | + # Verify that the predictor doesn't have an explicit LM |
| 55 | + assert student.predictor.lm is None |
| 56 | + |
| 57 | + # Initialize BootstrapFinetune - this should work without AttributeError |
| 58 | + bootstrap = BootstrapFinetune(metric=simple_metric) |
| 59 | + |
| 60 | + # Mock all the components that would fail without proper setup |
| 61 | + with patch('dspy.teleprompt.bootstrap_finetune.all_predictors_have_lms'), \ |
| 62 | + patch('dspy.teleprompt.bootstrap_finetune.prepare_teacher', return_value=teacher), \ |
| 63 | + patch('dspy.teleprompt.bootstrap_finetune.bootstrap_trace_data', return_value=[]), \ |
| 64 | + patch.object(bootstrap, '_prepare_finetune_data', return_value=([], 'openai')), \ |
| 65 | + patch.object(bootstrap, 'finetune_lms') as mock_finetune_lms: |
| 66 | + |
| 67 | + # Mock the finetune_lms to return a mapping from training key to LM |
| 68 | + mock_finetune_lms.return_value = {(lm, None): lm} |
| 69 | + |
| 70 | + # This should not raise AttributeError due to the fix |
| 71 | + compiled_student = bootstrap.compile(student, teacher=teacher, trainset=trainset) |
| 72 | + |
| 73 | + assert compiled_student is not None, "Failed to compile student" |
| 74 | + # Verify that finetune_lms was called |
| 75 | + mock_finetune_lms.assert_called_once() |
| 76 | + |
| 77 | + |
0 commit comments