Skip to content

Commit 7b12fd4

Browse files
Move litellm cache testing under tests/client and remove unnecessary tests (#8396)
* rename caching directory * move ifles * fix lint
1 parent 7a00238 commit 7b12fd4

File tree

2 files changed

+0
-63
lines changed

2 files changed

+0
-63
lines changed

tests/caching/example_cache/cache.db

-32 KB
Binary file not shown.

tests/caching/test_litellm_cache.py renamed to tests/clients/test_litellm_cache.py

Lines changed: 0 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import importlib
2-
import os
32
import shutil
43
import tempfile
54
from unittest.mock import patch
@@ -20,24 +19,6 @@ def temporary_blank_cache_dir(monkeypatch):
2019
dspy.configure_cache(enable_memory_cache=True, enable_disk_cache=True, enable_litellm_cache=False)
2120

2221

23-
@pytest.fixture()
24-
def temporary_populated_cache_dir(monkeypatch):
25-
"""
26-
A DSPy cache directory populated with a response for the request with text "Example query"
27-
to the model "openai/dspy-test-model".
28-
"""
29-
module_dir = os.path.dirname(os.path.abspath(__file__))
30-
populated_cache_path = os.path.join(module_dir, "example_cache")
31-
32-
with tempfile.TemporaryDirectory() as cache_dir_path:
33-
shutil.copytree(populated_cache_path, cache_dir_path, dirs_exist_ok=True)
34-
monkeypatch.setenv("DSPY_CACHEDIR", cache_dir_path)
35-
importlib.reload(dspy.clients)
36-
dspy.configure_cache(enable_memory_cache=True, enable_disk_cache=False, enable_litellm_cache=True)
37-
yield cache_dir_path
38-
dspy.configure_cache(enable_memory_cache=True, enable_disk_cache=True, enable_litellm_cache=False)
39-
40-
4122
def test_lm_calls_are_cached_across_lm_instances(litellm_test_server, temporary_blank_cache_dir):
4223
api_base, server_log_file_path = litellm_test_server
4324

@@ -77,25 +58,6 @@ def test_lm_calls_are_cached_across_lm_instances(litellm_test_server, temporary_
7758
assert len(request_logs) == 3
7859

7960

80-
def test_lm_calls_are_cached_across_interpreter_sessions(litellm_test_server, temporary_populated_cache_dir):
81-
"""
82-
Verifies that LM calls are cached across interpreter sessions. Pytest test cases effectively
83-
simulate separate interpreter sessions.
84-
"""
85-
api_base, server_log_file_path = litellm_test_server
86-
87-
lm1 = dspy.LM(
88-
model="openai/dspy-test-model",
89-
api_base=api_base,
90-
api_key="fakekey",
91-
max_tokens=1000,
92-
)
93-
lm1("Example query")
94-
95-
request_logs = read_litellm_test_server_request_logs(server_log_file_path)
96-
assert len(request_logs) == 0
97-
98-
9961
def test_lm_calls_are_cached_in_memory_when_expected(litellm_test_server, temporary_blank_cache_dir):
10062
api_base, server_log_file_path = litellm_test_server
10163

@@ -132,31 +94,6 @@ class NonJsonSerializable:
13294
assert mock_litellm_completion.call_count == 2
13395

13496

135-
# def test_lm_calls_with_callables_are_cached_as_expected():
136-
# with patch("litellm.completion") as mock_completion:
137-
# lm_with_callable = dspy.LM(
138-
# model="openai/dspy-test-model",
139-
# api_base="fakebase",
140-
# api_key="fakekey",
141-
# # Define a callable kwarg for the LM to use during inference
142-
# azure_ad_token_provider=lambda *args, **kwargs: None,
143-
# )
144-
# # Invoke the LM twice; the second call should be cached in memory
145-
# lm_with_callable("Query")
146-
# lm_with_callable("Query")
147-
148-
# # Define and invoke a nearly-identical LM that lacks the callable kwarg,
149-
# # which should not hit the in-memory cache
150-
# lm_without_callable = dspy.LM(
151-
# model="openai/dspy-test-model",
152-
# api_base="fakebase",
153-
# api_key="fakekey",
154-
# )
155-
# lm_without_callable("Query")
156-
157-
# assert mock_completion.call_count == 2
158-
159-
16097
def test_lms_called_expected_number_of_times_for_cache_key_generation_failures():
16198
with pytest.raises(RuntimeError), patch("litellm.completion") as mock_completion:
16299
mock_completion.side_effect = RuntimeError("Mocked exception")

0 commit comments

Comments
 (0)