File tree Expand file tree Collapse file tree 3 files changed +15
-5
lines changed Expand file tree Collapse file tree 3 files changed +15
-5
lines changed Original file line number Diff line number Diff line change @@ -818,14 +818,15 @@ def create_new_process_for_each_test(
818
818
819
819
Args:
820
820
method: The process creation method. Can be either "spawn" or "fork".
821
- If not specified,
822
- it defaults to "spawn" on ROCm platforms and "fork" otherwise.
821
+ If not specified, it defaults to "spawn" on ROCm and XPU
822
+ platforms and "fork" otherwise.
823
823
824
824
Returns:
825
825
A decorator to run test functions in separate processes.
826
826
"""
827
827
if method is None :
828
- method = "spawn" if current_platform .is_rocm () else "fork"
828
+ use_spawn = current_platform .is_rocm () or current_platform .is_xpu ()
829
+ method = "spawn" if use_spawn else "fork"
829
830
830
831
assert method in ["spawn" ,
831
832
"fork" ], "Method must be either 'spawn' or 'fork'"
Original file line number Diff line number Diff line change 5
5
6
6
from vllm import LLM , SamplingParams
7
7
8
- from ...utils import fork_new_process_for_each_test
8
+ from ...utils import create_new_process_for_each_test
9
9
10
10
11
- @fork_new_process_for_each_test
11
+ @create_new_process_for_each_test ()
12
12
@pytest .mark .parametrize ("attn_backend" ,
13
13
["FLASH_ATTN_VLLM_V1" , "FLASHINFER_VLLM_V1" ])
14
14
def test_cascade_attention (example_system_message , monkeypatch , attn_backend ):
Original file line number Diff line number Diff line change @@ -1535,6 +1535,13 @@ def cuda_is_initialized() -> bool:
1535
1535
return torch .cuda .is_initialized ()
1536
1536
1537
1537
1538
+ def xpu_is_initialized () -> bool :
1539
+ """Check if XPU is initialized."""
1540
+ if not torch .xpu ._is_compiled ():
1541
+ return False
1542
+ return torch .xpu .is_initialized ()
1543
+
1544
+
1538
1545
def cuda_get_device_properties (device ,
1539
1546
names : Sequence [str ],
1540
1547
init_cuda = False ) -> tuple [Any , ...]:
@@ -2848,6 +2855,8 @@ def _maybe_force_spawn():
2848
2855
reason = None
2849
2856
if cuda_is_initialized ():
2850
2857
reason = "CUDA is initialized"
2858
+ elif xpu_is_initialized ():
2859
+ reason = "XPU is initialized"
2851
2860
elif is_in_ray_actor ():
2852
2861
# even if we choose to spawn, we need to pass the ray address
2853
2862
# to the subprocess so that it knows how to connect to the ray cluster.
You can’t perform that action at this time.
0 commit comments