Skip to content

Commit db3c89b

Browse files
committed
Revert "[Misc]Use a platform independent interface to obtain the device attributes (vllm-project#17100)"
This reverts commit bdb2cdd.
1 parent 0b8eaec commit db3c89b

File tree

3 files changed

+4
-8
lines changed

3 files changed

+4
-8
lines changed

tests/conftest.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,7 @@ class HfRunner:
287287
def get_default_device(self):
288288
from vllm.platforms import current_platform
289289

290-
return ("cpu"
291-
if current_platform.is_cpu() else current_platform.device_type)
290+
return ("cpu" if current_platform.is_cpu() else "cuda")
292291

293292
def wrap_device(self, x: _T, device: Optional[str] = None) -> _T:
294293
if x is None or isinstance(x, (bool, )):

tests/v1/sample/test_sampler.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,14 @@
66
import pytest
77
import torch
88

9-
from vllm.platforms import current_platform
109
from vllm.utils import make_tensor_with_pad
1110
from vllm.v1.sample.metadata import SamplingMetadata
1211
from vllm.v1.sample.sampler import Sampler
1312

1413
VOCAB_SIZE = 1024
1514
NUM_OUTPUT_TOKENS = 20
1615
CUDA_DEVICES = [
17-
f"{current_platform.device_type}:{i}"
18-
for i in range(1 if current_platform.device_count() == 1 else 2)
16+
f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
1917
]
2018
MAX_NUM_PROMPT_TOKENS = 64
2119

vllm/worker/multi_step_model_runner.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
SamplerOutput,
1515
SamplingMetadata, get_logprobs,
1616
get_pythonized_sample_results)
17-
from vllm.platforms import current_platform
1817
from vllm.sequence import (CompletionSequenceGroupOutput, IntermediateTensors,
1918
Logprob, SequenceGroupMetadata, SequenceOutput)
2019
from vllm.utils import PyObjectCache, async_tensor_h2d, current_stream
@@ -159,8 +158,8 @@ class StatefulModelInput(BroadcastableModelInput):
159158
is_first_multi_step: bool = False
160159
base_output_proc_callback: Optional[Callable] = None
161160
# ping-pong data structures for multi-step to wait on the previous step
162-
step_cuda_events: List[current_platform.Event] = field(
163-
default_factory=lambda: [current_platform.Event(blocking=True)] * 2)
161+
step_cuda_events: List[torch.cuda.Event] = field(
162+
default_factory=lambda: [torch.cuda.Event(blocking=True)] * 2)
164163
num_seqs: int = -1
165164
num_queries: int = -1
166165
num_single_step_prefills: int = 0

0 commit comments

Comments
 (0)