|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project |
| 3 | + |
| 4 | +import io |
| 5 | +# imports for guided decoding tests |
| 6 | +import json |
| 7 | +from unittest.mock import patch |
| 8 | + |
| 9 | +import librosa |
| 10 | +import numpy as np |
| 11 | +import pytest |
| 12 | +import soundfile as sf |
| 13 | +from openai._base_client import AsyncAPIClient |
| 14 | + |
| 15 | +from vllm.assets.audio import AudioAsset |
| 16 | + |
| 17 | +from ...utils import RemoteOpenAIServer |
| 18 | + |
| 19 | + |
| 20 | +@pytest.fixture |
| 21 | +def foscolo(): |
| 22 | + # Test translation it->en |
| 23 | + path = AudioAsset('azacinto_foscolo').get_local_path() |
| 24 | + with open(str(path), "rb") as f: |
| 25 | + yield f |
| 26 | + |
| 27 | + |
| 28 | +# NOTE: (NickLucche) the large-v3-turbo model was not trained on translation! |
| 29 | +@pytest.mark.asyncio |
| 30 | +async def test_basic_audio(foscolo): |
| 31 | + model_name = "openai/whisper-small" |
| 32 | + server_args = ["--enforce-eager"] |
| 33 | + with RemoteOpenAIServer(model_name, server_args) as remote_server: |
| 34 | + client = remote_server.get_async_client() |
| 35 | + translation = await client.audio.translations.create( |
| 36 | + model=model_name, |
| 37 | + file=foscolo, |
| 38 | + response_format="text", |
| 39 | + # TODO remove once language detection is implemented |
| 40 | + extra_body=dict(language="it"), |
| 41 | + temperature=0.0) |
| 42 | + out = json.loads(translation)['text'].strip() |
| 43 | + assert "Nor will I ever touch the sacred" in out |
| 44 | + |
| 45 | + |
| 46 | +@pytest.mark.asyncio |
| 47 | +async def test_audio_prompt(foscolo): |
| 48 | + model_name = "openai/whisper-small" |
| 49 | + server_args = ["--enforce-eager"] |
| 50 | + # Condition whisper on starting text |
| 51 | + prompt = "Nor have I ever" |
| 52 | + with RemoteOpenAIServer(model_name, server_args) as remote_server: |
| 53 | + client = remote_server.get_async_client() |
| 54 | + transcription = await client.audio.translations.create( |
| 55 | + model=model_name, |
| 56 | + file=foscolo, |
| 57 | + prompt=prompt, |
| 58 | + extra_body=dict(language="it"), |
| 59 | + response_format="text", |
| 60 | + temperature=0.0) |
| 61 | + out = json.loads(transcription)['text'] |
| 62 | + assert "Nor will I ever touch the sacred" not in out |
| 63 | + assert prompt not in out |
| 64 | + |
| 65 | + |
| 66 | +@pytest.mark.asyncio |
| 67 | +async def test_non_asr_model(foscolo): |
| 68 | + # text to text model |
| 69 | + model_name = "JackFram/llama-68m" |
| 70 | + server_args = ["--enforce-eager"] |
| 71 | + with RemoteOpenAIServer(model_name, server_args) as remote_server: |
| 72 | + client = remote_server.get_async_client() |
| 73 | + res = await client.audio.translations.create(model=model_name, |
| 74 | + file=foscolo, |
| 75 | + temperature=0.0) |
| 76 | + assert res.code == 400 and not res.text |
| 77 | + assert res.message == "The model does not support Translations API" |
| 78 | + |
| 79 | + |
| 80 | +@pytest.mark.asyncio |
| 81 | +async def test_streaming_response(foscolo): |
| 82 | + model_name = "openai/whisper-small" |
| 83 | + server_args = ["--enforce-eager"] |
| 84 | + translation = "" |
| 85 | + with RemoteOpenAIServer(model_name, server_args) as remote_server: |
| 86 | + client = remote_server.get_async_client() |
| 87 | + res_no_stream = await client.audio.translations.create( |
| 88 | + model=model_name, |
| 89 | + file=foscolo, |
| 90 | + response_format="json", |
| 91 | + extra_body=dict(language="it"), |
| 92 | + temperature=0.0) |
| 93 | + # Unfortunately this only works when the openai client is patched |
| 94 | + # to use streaming mode, not exposed in the translation api. |
| 95 | + original_post = AsyncAPIClient.post |
| 96 | + |
| 97 | + async def post_with_stream(*args, **kwargs): |
| 98 | + kwargs['stream'] = True |
| 99 | + return await original_post(*args, **kwargs) |
| 100 | + |
| 101 | + with patch.object(AsyncAPIClient, "post", new=post_with_stream): |
| 102 | + client = remote_server.get_async_client() |
| 103 | + res = await client.audio.translations.create(model=model_name, |
| 104 | + file=foscolo, |
| 105 | + temperature=0.0, |
| 106 | + extra_body=dict( |
| 107 | + stream=True, |
| 108 | + language="it")) |
| 109 | + # Reconstruct from chunks and validate |
| 110 | + async for chunk in res: |
| 111 | + # just a chunk |
| 112 | + text = chunk.choices[0]['delta']['content'] |
| 113 | + translation += text |
| 114 | + |
| 115 | + assert translation == res_no_stream.text |
| 116 | + |
| 117 | + |
| 118 | +@pytest.mark.asyncio |
| 119 | +async def test_stream_options(foscolo): |
| 120 | + model_name = "openai/whisper-small" |
| 121 | + server_args = ["--enforce-eager"] |
| 122 | + with RemoteOpenAIServer(model_name, server_args) as remote_server: |
| 123 | + original_post = AsyncAPIClient.post |
| 124 | + |
| 125 | + async def post_with_stream(*args, **kwargs): |
| 126 | + kwargs['stream'] = True |
| 127 | + return await original_post(*args, **kwargs) |
| 128 | + |
| 129 | + with patch.object(AsyncAPIClient, "post", new=post_with_stream): |
| 130 | + client = remote_server.get_async_client() |
| 131 | + res = await client.audio.translations.create( |
| 132 | + model=model_name, |
| 133 | + file=foscolo, |
| 134 | + temperature=0.0, |
| 135 | + extra_body=dict(language="it", |
| 136 | + stream=True, |
| 137 | + stream_include_usage=True, |
| 138 | + stream_continuous_usage_stats=True)) |
| 139 | + final = False |
| 140 | + continuous = True |
| 141 | + async for chunk in res: |
| 142 | + if not len(chunk.choices): |
| 143 | + # final usage sent |
| 144 | + final = True |
| 145 | + else: |
| 146 | + continuous = continuous and hasattr(chunk, 'usage') |
| 147 | + assert final and continuous |
| 148 | + |
| 149 | + |
| 150 | +@pytest.mark.asyncio |
| 151 | +async def test_long_audio_request(foscolo): |
| 152 | + model_name = "openai/whisper-small" |
| 153 | + server_args = ["--enforce-eager"] |
| 154 | + |
| 155 | + foscolo.seek(0) |
| 156 | + audio, sr = librosa.load(foscolo) |
| 157 | + repeated_audio = np.tile(audio, 2) |
| 158 | + # Repeated audio to buffer |
| 159 | + buffer = io.BytesIO() |
| 160 | + sf.write(buffer, repeated_audio, sr, format='WAV') |
| 161 | + buffer.seek(0) |
| 162 | + with RemoteOpenAIServer(model_name, server_args) as remote_server: |
| 163 | + client = remote_server.get_async_client() |
| 164 | + translation = await client.audio.translations.create( |
| 165 | + model=model_name, |
| 166 | + file=buffer, |
| 167 | + extra_body=dict(language="it"), |
| 168 | + response_format="text", |
| 169 | + temperature=0.0) |
| 170 | + out = json.loads(translation)['text'].strip().lower() |
| 171 | + # TODO investigate higher model uncertainty in for longer translations. |
| 172 | + assert out.count("nor will i ever") == 2 |
0 commit comments