Skip to content

Commit 267c1a1

Browse files
committed
format
1 parent 4773c29 commit 267c1a1

File tree

4 files changed

+5
-4
lines changed

4 files changed

+5
-4
lines changed

vllm/_custom_ops.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1104,6 +1104,7 @@ def register_graph_buffers(fa: int, handles: List[str],
11041104
offsets: List[List[int]]) -> None:
11051105
torch.ops._C_custom_ar.register_graph_buffers(fa, handles, offsets)
11061106

1107+
11071108
def allocate_meta_buffer(size: int) -> torch.Tensor:
11081109
return torch.ops._C_custom_ar.allocate_meta_buffer(size)
11091110

vllm/config.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,6 @@ def compute_hash(self) -> str:
183183
factors.append(self.model)
184184
factors.append(self.dtype)
185185
factors.append(self.quantization)
186-
factors.append(self.quantization_param_path)
187186
factors.append(self.revision)
188187
factors.append(self.code_revision)
189188
factors.append(self.trust_remote_code)
@@ -560,6 +559,7 @@ def _verify_quantization(self) -> None:
560559

561560
# Detect which checkpoint is it
562561
for name in QUANTIZATION_METHODS:
562+
from vllm.platforms import current_platform
563563
method = get_quantization_config(name)
564564
quantization_override = method.override_quantization_method(
565565
quant_cfg, self.quantization)
@@ -1350,7 +1350,6 @@ def use_ray(self) -> bool:
13501350
def _verify_args(self) -> None:
13511351
# Lazy import to avoid circular import
13521352
from vllm.executor.executor_base import ExecutorBase
1353-
from vllm.platforms import current_platform
13541353
if self.distributed_executor_backend not in (
13551354
"ray", "mp", None) and not (isinstance(
13561355
self.distributed_executor_backend, type) and issubclass(

vllm/platforms/rocm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import os
22
from functools import lru_cache, wraps
3-
from typing import TYPE_CHECKING, List, Dict, List, Optional
3+
from typing import TYPE_CHECKING, Dict, List, Optional
44

55
import torch
66
from amdsmi import (AmdSmiException, amdsmi_get_gpu_board_info,

vllm/transformers_utils/config.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@
2424
# yapf: disable
2525
from vllm.transformers_utils.configs import (ChatGLMConfig, Cohere2Config,
2626
DbrxConfig, EAGLEConfig,
27-
Grok1Config, ExaoneConfig, H2OVLChatConfig,
27+
ExaoneConfig, Grok1Config,
28+
H2OVLChatConfig,
2829
InternVLChatConfig, JAISConfig,
2930
MedusaConfig, MllamaConfig,
3031
MLPSpeculatorConfig, MPTConfig,

0 commit comments

Comments
 (0)