|
| 1 | +import os |
| 2 | +import tempfile |
| 3 | +from typing import Dict, Any |
| 4 | + |
| 5 | +import numpy as np |
| 6 | +from numpy.typing import NDArray |
| 7 | +import pytest |
| 8 | +import onnxruntime |
| 9 | +from unittest.mock import patch, MagicMock |
| 10 | + |
| 11 | +from chromadb.utils.embedding_functions import ONNXMiniLM_L6_V2 |
| 12 | +from chromadb.utils.embedding_functions.embedding_function import ( |
| 13 | + EmbeddingFunction, |
| 14 | +) |
| 15 | + |
| 16 | + |
| 17 | +class TestONNXMiniLM_L6_V2: |
| 18 | + """Test suite for ONNXMiniLM_L6_V2 embedding function.""" |
| 19 | + |
| 20 | + def test_initialization(self) -> None: |
| 21 | + """Test that the embedding function initializes correctly.""" |
| 22 | + ef = ONNXMiniLM_L6_V2() |
| 23 | + assert ef is not None |
| 24 | + assert isinstance(ef, EmbeddingFunction) |
| 25 | + |
| 26 | + # Test with valid providers |
| 27 | + available_providers = onnxruntime.get_available_providers() |
| 28 | + if available_providers: |
| 29 | + ef = ONNXMiniLM_L6_V2(preferred_providers=[available_providers[0]]) |
| 30 | + assert ef is not None |
| 31 | + |
| 32 | + # Test with None providers |
| 33 | + ef = ONNXMiniLM_L6_V2(preferred_providers=None) |
| 34 | + assert ef is not None |
| 35 | + |
| 36 | + def test_embedding_shape_and_normalization(self) -> None: |
| 37 | + """Test that embeddings have the correct shape and are normalized.""" |
| 38 | + ef = ONNXMiniLM_L6_V2() |
| 39 | + |
| 40 | + # Test with a single document |
| 41 | + docs = ["This is a test document"] |
| 42 | + embeddings = ef(docs) |
| 43 | + |
| 44 | + # Check shape and type |
| 45 | + assert isinstance(embeddings, list) |
| 46 | + assert len(embeddings) == 1 |
| 47 | + assert ( |
| 48 | + len(embeddings[0]) == 384 |
| 49 | + ) # MiniLM-L6-v2 produces 384-dimensional embeddings |
| 50 | + |
| 51 | + # Check normalization (for cosine similarity) |
| 52 | + embedding_np = np.array(embeddings[0]) |
| 53 | + norm = np.linalg.norm(embedding_np) |
| 54 | + assert np.isclose(norm, 1.0, atol=1e-5) |
| 55 | + |
| 56 | + # Test with multiple documents |
| 57 | + docs = ["First document", "Second document", "Third document"] |
| 58 | + embeddings = ef(docs) |
| 59 | + |
| 60 | + # Check shape |
| 61 | + assert len(embeddings) == 3 |
| 62 | + assert all(len(emb) == 384 for emb in embeddings) |
| 63 | + |
| 64 | + def test_batch_processing(self) -> None: |
| 65 | + """Test that the embedding function correctly processes batches.""" |
| 66 | + ef = ONNXMiniLM_L6_V2() |
| 67 | + |
| 68 | + # Create a list of documents larger than the default batch size (32) |
| 69 | + docs = [f"Document {i}" for i in range(40)] |
| 70 | + |
| 71 | + # Get embeddings |
| 72 | + embeddings = ef(docs) |
| 73 | + |
| 74 | + # Check that all documents were processed |
| 75 | + assert len(embeddings) == 40 |
| 76 | + assert all(len(emb) == 384 for emb in embeddings) |
| 77 | + |
| 78 | + def test_config_serialization(self) -> None: |
| 79 | + """Test that the embedding function can be serialized and deserialized.""" |
| 80 | + # Create an embedding function with specific providers |
| 81 | + available_providers = onnxruntime.get_available_providers() |
| 82 | + providers = available_providers[:1] if available_providers else None |
| 83 | + ef = ONNXMiniLM_L6_V2(preferred_providers=providers) |
| 84 | + |
| 85 | + # Get config |
| 86 | + config = ef.get_config() |
| 87 | + |
| 88 | + # Check config |
| 89 | + assert isinstance(config, dict) |
| 90 | + assert "preferred_providers" in config |
| 91 | + |
| 92 | + # Build from config |
| 93 | + ef2 = ONNXMiniLM_L6_V2.build_from_config(config) |
| 94 | + |
| 95 | + # Check that the new instance works |
| 96 | + docs = ["Test document"] |
| 97 | + embeddings = ef2(docs) |
| 98 | + assert len(embeddings) == 1 |
| 99 | + assert len(embeddings[0]) == 384 |
| 100 | + |
| 101 | + def test_max_tokens(self) -> None: |
| 102 | + """Test the max_tokens method.""" |
| 103 | + ef = ONNXMiniLM_L6_V2() |
| 104 | + assert ef.max_tokens() == 256 # Default for this model |
| 105 | + |
| 106 | + @patch("httpx.stream") |
| 107 | + def test_download_functionality(self, mock_stream: MagicMock) -> None: |
| 108 | + """Test the model download functionality with mocking.""" |
| 109 | + # Setup mock response |
| 110 | + mock_response = MagicMock() |
| 111 | + mock_response.raise_for_status.return_value = None |
| 112 | + mock_response.headers.get.return_value = "1000" |
| 113 | + mock_response.iter_bytes.return_value = [b"test data"] |
| 114 | + mock_stream.return_value.__enter__.return_value = mock_response |
| 115 | + |
| 116 | + # Create a temporary directory for testing |
| 117 | + with tempfile.TemporaryDirectory() as temp_dir: |
| 118 | + # Patch the download path |
| 119 | + with patch.object(ONNXMiniLM_L6_V2, "DOWNLOAD_PATH", temp_dir): |
| 120 | + with patch( |
| 121 | + "chromadb.utils.embedding_functions.onnx_mini_lm_l6_v2._verify_sha256", |
| 122 | + return_value=True, |
| 123 | + ): |
| 124 | + ef = ONNXMiniLM_L6_V2() |
| 125 | + # Call download method directly |
| 126 | + ef._download( |
| 127 | + url="https://test.url", |
| 128 | + fname=os.path.join(temp_dir, "test_file"), |
| 129 | + ) |
| 130 | + |
| 131 | + # Check that the file was created |
| 132 | + assert os.path.exists(os.path.join(temp_dir, "test_file")) |
| 133 | + |
| 134 | + def test_validate_config(self) -> None: |
| 135 | + """Test config validation.""" |
| 136 | + ef = ONNXMiniLM_L6_V2() |
| 137 | + |
| 138 | + # Test validate_config |
| 139 | + config: Dict[str, Any] = {"preferred_providers": ["CPUExecutionProvider"]} |
| 140 | + ef.validate_config(config) # Should not raise |
| 141 | + |
| 142 | + # Test validate_config_update |
| 143 | + old_config: Dict[str, Any] = {"preferred_providers": ["CPUExecutionProvider"]} |
| 144 | + new_config: Dict[str, Any] = {"preferred_providers": ["CUDAExecutionProvider"]} |
| 145 | + ef.validate_config_update(old_config, new_config) # Should not raise |
| 146 | + |
| 147 | + @pytest.mark.parametrize( |
| 148 | + "input_text", |
| 149 | + [ |
| 150 | + "Short text", |
| 151 | + "A longer text that contains multiple words and should be embedded properly", |
| 152 | + "", # Empty string |
| 153 | + "Special characters: !@#$%^&*()", |
| 154 | + "Numbers: 1234567890", |
| 155 | + "Unicode: 你好, こんにちは, 안녕하세요", |
| 156 | + ], |
| 157 | + ) |
| 158 | + def test_various_inputs(self, input_text: str) -> None: |
| 159 | + """Test the embedding function with various types of input text.""" |
| 160 | + ef = ONNXMiniLM_L6_V2() |
| 161 | + |
| 162 | + # Get embeddings |
| 163 | + embeddings = ef([input_text]) |
| 164 | + |
| 165 | + # Check that embeddings were generated |
| 166 | + assert len(embeddings) == 1 |
| 167 | + assert len(embeddings[0]) == 384 |
| 168 | + |
| 169 | + def test_consistency(self) -> None: |
| 170 | + """Test that the embedding function produces consistent results.""" |
| 171 | + ef = ONNXMiniLM_L6_V2() |
| 172 | + |
| 173 | + # Get embeddings for the same text twice |
| 174 | + text = "This is a test document" |
| 175 | + embeddings1 = ef([text]) |
| 176 | + embeddings2 = ef([text]) |
| 177 | + |
| 178 | + # Check that the embeddings are the same |
| 179 | + np.testing.assert_allclose(embeddings1[0], embeddings2[0]) |
| 180 | + |
| 181 | + def test_similar_texts_have_similar_embeddings(self) -> None: |
| 182 | + """Test that similar texts have similar embeddings.""" |
| 183 | + ef = ONNXMiniLM_L6_V2() |
| 184 | + |
| 185 | + # Get embeddings for similar texts |
| 186 | + text1 = "The cat sat on the mat" |
| 187 | + text2 = "A cat was sitting on a mat" |
| 188 | + text3 = "Quantum physics is fascinating" |
| 189 | + |
| 190 | + embeddings = ef([text1, text2, text3]) |
| 191 | + |
| 192 | + # Calculate cosine similarities |
| 193 | + def cosine_similarity(a: NDArray[np.float32], b: NDArray[np.float32]) -> float: |
| 194 | + return float(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))) |
| 195 | + |
| 196 | + # Similar texts should have higher similarity |
| 197 | + sim_1_2 = cosine_similarity( |
| 198 | + np.array(embeddings[0], dtype=np.float32), |
| 199 | + np.array(embeddings[1], dtype=np.float32), |
| 200 | + ) |
| 201 | + sim_1_3 = cosine_similarity( |
| 202 | + np.array(embeddings[0], dtype=np.float32), |
| 203 | + np.array(embeddings[2], dtype=np.float32), |
| 204 | + ) |
| 205 | + |
| 206 | + # The similarity between text1 and text2 should be higher than between text1 and text3 |
| 207 | + assert sim_1_2 > sim_1_3 |
0 commit comments