7
7
8
8
# Define a simple metric function for testing
9
9
def simple_metric (example , prediction , trace = None ):
10
- # Simplified metric for testing: true if prediction matches expected output
11
10
return example .output == prediction .output
12
11
13
12
@@ -41,20 +40,15 @@ def test_compile_with_predict_instances_no_explicit_lm():
41
40
from unittest .mock import patch
42
41
43
42
# Create student and teacher modules without explicit LMs in predictors
44
- # This tests the fix: lm = pred.lm or settings.lm
45
43
student = SimpleModule ("input -> output" )
46
44
teacher = SimpleModule ("input -> output" )
47
45
48
- # Set up LM in settings - this will be the fallback
49
46
lm = DummyLM (["Initial thoughts" , "Finish[blue]" ])
50
47
original_lm = dspy .settings .lm
51
48
dspy .settings .configure (lm = lm )
52
49
53
-
54
50
# Verify that the predictor doesn't have an explicit LM
55
51
assert student .predictor .lm is None
56
-
57
- # Initialize BootstrapFinetune - this should work without AttributeError
58
52
bootstrap = BootstrapFinetune (metric = simple_metric )
59
53
60
54
# Mock all the components that would fail without proper setup
@@ -64,14 +58,12 @@ def test_compile_with_predict_instances_no_explicit_lm():
64
58
patch .object (bootstrap , '_prepare_finetune_data' , return_value = ([], 'openai' )), \
65
59
patch .object (bootstrap , 'finetune_lms' ) as mock_finetune_lms :
66
60
67
- # Mock the finetune_lms to return a mapping from training key to LM
68
61
mock_finetune_lms .return_value = {(lm , None ): lm }
69
62
70
63
# This should not raise AttributeError due to the fix
71
64
compiled_student = bootstrap .compile (student , teacher = teacher , trainset = trainset )
72
65
73
66
assert compiled_student is not None , "Failed to compile student"
74
- # Verify that finetune_lms was called
75
67
mock_finetune_lms .assert_called_once ()
76
68
77
69
0 commit comments