@@ -387,7 +387,7 @@ async def gpt_4o_mini_stream_2():
387
387
async def completion_side_effect (* args , ** kwargs ):
388
388
return stream_generators .pop (0 )() # return new async generator instance
389
389
390
- with mock .patch ("litellm.acompletion" , side_effect = completion_side_effect ) as mock_completion :
390
+ with mock .patch ("litellm.acompletion" , side_effect = completion_side_effect ):
391
391
program = dspy .streamify (
392
392
MyProgram (),
393
393
stream_listeners = [
@@ -483,7 +483,7 @@ async def gpt_4o_mini_stream_2(*args, **kwargs):
483
483
484
484
with mock .patch (
485
485
"litellm.acompletion" , new_callable = AsyncMock , side_effect = [gpt_4o_mini_stream_1 (), gpt_4o_mini_stream_2 ()]
486
- ) as mock_completion :
486
+ ):
487
487
program = dspy .streamify (
488
488
MyProgram (),
489
489
stream_listeners = [
@@ -762,3 +762,78 @@ async def completion_side_effect(*args, **kwargs):
762
762
concat_message = "" .join ([chunk .chunk for chunk in all_chunks ])
763
763
# The listener functions twice.
764
764
assert concat_message == "To get to the other side!To get to the other side!"
765
+
766
+ @pytest .mark .anyio
767
+ async def test_stream_listener_returns_correct_chunk_xml_adapter ():
768
+ class MyProgram (dspy .Module ):
769
+ def __init__ (self ):
770
+ super ().__init__ ()
771
+ self .predict1 = dspy .Predict ("question->answer" )
772
+ self .predict2 = dspy .Predict ("question,answer->judgement" )
773
+
774
+ def forward (self , question , ** kwargs ):
775
+ answer = self .predict1 (question = question , ** kwargs ).answer
776
+ judgement = self .predict2 (question = question , answer = answer , ** kwargs )
777
+ return judgement
778
+
779
+ async def xml_stream_1 (* args , ** kwargs ):
780
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "<" ))])
781
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "answer" ))])
782
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = ">" ))])
783
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "To" ))])
784
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " get" ))])
785
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " to" ))])
786
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " the" ))])
787
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " other" ))])
788
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " side" ))])
789
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "!" ))])
790
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "<" ))])
791
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "/answer" ))])
792
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = ">" ))])
793
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "<" ))])
794
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "completed" ))])
795
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = ">" ))])
796
+
797
+ async def xml_stream_2 (* args , ** kwargs ):
798
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "<" ))])
799
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "judgement" ))])
800
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = ">" ))])
801
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "The" ))])
802
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " answer" ))])
803
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " is" ))])
804
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = " humorous" ))])
805
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "." ))])
806
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "<" ))])
807
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "/judgement" ))])
808
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = ">" ))])
809
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "<" ))])
810
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = "completed" ))])
811
+ yield ModelResponseStream (model = "gpt-4o-mini" , choices = [StreamingChoices (delta = Delta (content = ">" ))])
812
+
813
+ stream_generators = [xml_stream_1 , xml_stream_2 ]
814
+
815
+ async def completion_side_effect (* args , ** kwargs ):
816
+ return stream_generators .pop (0 )()
817
+
818
+ with mock .patch ("litellm.acompletion" , side_effect = completion_side_effect ):
819
+ program = dspy .streamify (
820
+ MyProgram (),
821
+ stream_listeners = [
822
+ dspy .streaming .StreamListener (signature_field_name = "answer" ),
823
+ dspy .streaming .StreamListener (signature_field_name = "judgement" ),
824
+ ],
825
+ )
826
+ with dspy .context (lm = dspy .LM ("openai/gpt-4o-mini" , cache = False ), adapter = dspy .XMLAdapter ()):
827
+ output = program (question = "why did a chicken cross the kitchen?" )
828
+ all_chunks = []
829
+ async for value in output :
830
+ if isinstance (value , dspy .streaming .StreamResponse ):
831
+ all_chunks .append (value )
832
+
833
+ assert all_chunks [0 ].predict_name == "predict1"
834
+ assert all_chunks [0 ].signature_field_name == "answer"
835
+ assert all_chunks [0 ].chunk == "To get to the other side!"
836
+
837
+ assert all_chunks [1 ].predict_name == "predict2"
838
+ assert all_chunks [1 ].signature_field_name == "judgement"
839
+ assert all_chunks [1 ].chunk == "The answer is humorous."
0 commit comments