|
8 | 8 |
|
9 | 9 | import pytest
|
10 | 10 | import torch
|
| 11 | +from transformers import BitsAndBytesConfig |
11 | 12 |
|
12 | 13 | from tests.quantization.utils import is_quant_method_supported
|
13 | 14 |
|
| 15 | +from ..models.utils import check_embeddings_close |
14 | 16 | from ..utils import compare_two_settings, create_new_process_for_each_test
|
15 | 17 |
|
16 | 18 | models_4bit_to_test = [
|
|
19 | 21 | "quantize inflight model with both HF and Mistral format weights")
|
20 | 22 | ]
|
21 | 23 |
|
| 24 | +models_4bit_to_embedding_test = [ |
| 25 | + ("intfloat/e5-mistral-7b-instruct", "quantize embedding model inflight"), |
| 26 | +] |
| 27 | + |
22 | 28 | models_pre_qaunt_4bit_to_test = [
|
23 | 29 | ('PrunaAI/Einstein-v6.1-Llama3-8B-bnb-4bit-smashed',
|
24 | 30 | 'read pre-quantized 4-bit FP4 model'),
|
|
31 | 37 | ("yec019/fbopt-350m-8bit", "read pre-quantized 8-bit opt model"),
|
32 | 38 | ]
|
33 | 39 |
|
| 40 | +models_pre_quant_8bit_to_test = [ |
| 41 | + ('meta-llama/Llama-Guard-3-8B-INT8', |
| 42 | + 'read pre-quantized llama 8-bit model'), |
| 43 | + ("yec019/fbopt-350m-8bit", "read pre-quantized 8-bit opt model"), |
| 44 | +] |
| 45 | + |
34 | 46 |
|
35 | 47 | @pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"),
|
36 | 48 | reason='bitsandbytes is not supported on this GPU type.')
|
|
39 | 51 | def test_load_4bit_bnb_model(hf_runner, vllm_runner, example_prompts,
|
40 | 52 | model_name, description) -> None:
|
41 | 53 |
|
42 |
| - hf_model_kwargs = {"load_in_4bit": True} |
| 54 | + hf_model_kwargs = dict(quantization_config=BitsAndBytesConfig( |
| 55 | + load_in_4bit=True)) |
43 | 56 | validate_generated_texts(hf_runner, vllm_runner, example_prompts[:1],
|
44 | 57 | model_name, False, hf_model_kwargs)
|
45 | 58 |
|
@@ -77,7 +90,8 @@ def test_load_8bit_bnb_model(hf_runner, vllm_runner, example_prompts,
|
77 | 90 | def test_load_tp_4bit_bnb_model(hf_runner, vllm_runner, example_prompts,
|
78 | 91 | model_name, description) -> None:
|
79 | 92 |
|
80 |
| - hf_model_kwargs = {"load_in_4bit": True} |
| 93 | + hf_model_kwargs = dict(quantization_config=BitsAndBytesConfig( |
| 94 | + load_in_4bit=True)) |
81 | 95 | validate_generated_texts(hf_runner,
|
82 | 96 | vllm_runner,
|
83 | 97 | example_prompts[:1],
|
@@ -113,6 +127,54 @@ def test_load_pp_4bit_bnb_model(model_name, description) -> None:
|
113 | 127 | compare_two_settings(model_name, common_args, pp_args)
|
114 | 128 |
|
115 | 129 |
|
| 130 | +@pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"), |
| 131 | + reason='bitsandbytes is not supported on this GPU type.') |
| 132 | +@pytest.mark.parametrize("model_name, description", |
| 133 | + models_4bit_to_embedding_test) |
| 134 | +@pytest.mark.parametrize("dtype", ["half"]) |
| 135 | +@create_new_process_for_each_test() |
| 136 | +def test_4bit_bnb_embedding_model( |
| 137 | + model_name, |
| 138 | + description, |
| 139 | + hf_runner, |
| 140 | + vllm_runner, |
| 141 | + example_prompts, |
| 142 | + dtype: str, |
| 143 | +) -> None: |
| 144 | + |
| 145 | + # The example_prompts has ending "\n", for example: |
| 146 | + # "Write a short story about a robot that dreams for the first time.\n" |
| 147 | + # sentence_transformers will strip the input texts, see: |
| 148 | + # https://github.com/UKPLab/sentence-transformers/blob/v3.1.1/sentence_transformers/models/Transformer.py#L159 |
| 149 | + # This makes the input_ids different between hf_model and vllm_model. |
| 150 | + # So we need to strip the input texts to avoid test failing. |
| 151 | + example_prompts = [str(s).strip() for s in example_prompts] |
| 152 | + |
| 153 | + # Inflight 4bit quantization |
| 154 | + hf_model_kwargs = dict(quantization_config=BitsAndBytesConfig( |
| 155 | + load_in_4bit=True)) |
| 156 | + with hf_runner( |
| 157 | + model_name, |
| 158 | + dtype=dtype, |
| 159 | + model_kwargs=hf_model_kwargs, |
| 160 | + is_sentence_transformer=True, |
| 161 | + ) as hf_model: |
| 162 | + hf_outputs = hf_model.encode(example_prompts) |
| 163 | + |
| 164 | + with vllm_runner(model_name, |
| 165 | + task="embed", |
| 166 | + dtype=dtype, |
| 167 | + quantization="bitsandbytes") as vllm_model: |
| 168 | + vllm_outputs = vllm_model.encode(example_prompts) |
| 169 | + check_embeddings_close( |
| 170 | + embeddings_0_lst=hf_outputs, |
| 171 | + embeddings_1_lst=vllm_outputs, |
| 172 | + name_0="hf", |
| 173 | + name_1="vllm", |
| 174 | + tol=5e-2, |
| 175 | + ) |
| 176 | + |
| 177 | + |
116 | 178 | def log_generated_texts(prompts, outputs, runner_name):
|
117 | 179 | logged_texts = []
|
118 | 180 | for i, (_, generated_text) in enumerate(outputs):
|
|
0 commit comments