Skip to content

Commit c0f2e87

Browse files
committed
Formatting
1 parent 409a439 commit c0f2e87

File tree

6 files changed

+8
-8
lines changed

6 files changed

+8
-8
lines changed

.github/workflows/scripts/build.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ export MAX_JOBS=1
1212
# Make sure release wheels are built for the following architectures
1313
export PYTORCH_ROCM_ARCH="gfx90a;gfx942"
1414

15-
rm -f $(which sccache)
15+
rm -f "$(which sccache)"
1616

1717
export MAX_JOBS=32
1818

benchmarks/kernels/benchmark_mixtral_moe_rocm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def run_grid(bs, model, TP):
266266
print(f"writing config to file {filename}")
267267
existing_content = {}
268268
if os.path.exists(filename):
269-
with open(filename, "r") as f:
269+
with open(filename) as f:
270270
existing_content = json.load(f)
271271
existing_content[str(bs)] = best_config
272272
with open(filename, "w") as f:

vllm/_custom_ops.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import contextlib
22
import functools
33
import importlib
4-
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
4+
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
55

66
import torch
77
import torch.library
@@ -242,8 +242,8 @@ def scaled_rms_norm(out: torch.Tensor, input: torch.Tensor,
242242
def scaled_fused_add_rms_norm(out: torch.Tensor, input: torch.Tensor,
243243
residual: torch.Tensor, weight: torch.Tensor,
244244
scale: torch.Tensor, epsilon: float) -> None:
245-
torch.ops._C.fused_add_rms_norm_static_fp8_quant(out, input, residual, weight, scale,
246-
epsilon)
245+
torch.ops._C.fused_add_rms_norm_static_fp8_quant(out, input, residual,
246+
weight, scale, epsilon)
247247

248248

249249
def advance_step_flashattn(num_seqs: int, num_queries: int, block_size: int,

vllm/attention/backends/hpu_attn.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ def forward(
141141
k_scale: float = 1.0,
142142
v_scale: float = 1.0,
143143
attn_type: AttentionType = AttentionType.DECODER,
144+
fp8_out_scale: Optional[torch.Tensor] = None,
144145
) -> torch.Tensor:
145146
"""Forward pass with xFormers and PagedAttention.
146147

vllm/model_executor/models/grok1.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
# coding=utf-8
21
# Adapted from
32
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
43
# Copyright 2023 The vLLM team.

vllm/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ class _Sentinel:
158158
ALL_PINNED_SENTINEL = _Sentinel()
159159

160160

161-
class rpd_trace():
161+
class rpd_trace:
162162

163163
def __init__(self,
164164
filename=None,
@@ -244,7 +244,7 @@ def is_hipScopedMarker_available():
244244
return hipScopedMarker is not None
245245

246246

247-
class rpd_mark():
247+
class rpd_mark:
248248

249249
def __init__(self, name=None):
250250
self.name = name

0 commit comments

Comments
 (0)