Skip to content

Commit

Permalink
fix(ci): resolve issues from SIM113
Browse files Browse the repository at this point in the history
  • Loading branch information
KarelZe committed Feb 15, 2024
1 parent 88f9f8f commit fc1637d
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 10 deletions.
13 changes: 8 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,11 @@ omit = [
]

[tool.ruff]

include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]

[tool.ruff.lint]

# See rules: https://beta.ruff.rs/docs/rules/
select = [
"C", # flake8-comprehensions
Expand All @@ -106,8 +111,6 @@ select = [
"W", # pycodestyle warnings
]

include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"]


ignore = [
"E501", # line too long, handled by black
Expand All @@ -118,12 +121,12 @@ ignore = [
"W191", # tab identation
]

[tool.ruff.isort]
[tool.ruff.lint.isort]
known-first-party = ["otc"]
section-order = ["future", "standard-library", "third-party", "first-party", "local-folder"]

[tool.ruff.per-file-ignores]
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["D104", "F401"] # disable missing docstrings in __init__, unused imports

[tool.ruff.pydocstyle]
[tool.ruff.lint.pydocstyle]
convention = "google"
5 changes: 0 additions & 5 deletions src/otc/models/transformer_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,6 @@ def fit(
for epoch in range(self.epochs_finetune):
# perform training
loss_in_epoch_train = 0
train_batch = 0

self.clf.train()

Expand Down Expand Up @@ -498,15 +497,13 @@ def fit(

self._stats_step.append({"train_loss": train_loss.item(), "step": step})

train_batch += 1
step += 1

self.clf.eval()

loss_in_epoch_val = 0.0
correct = 0

val_batch = 0
with torch.no_grad():
for x_cat, x_cont, weights, targets in val_loader_finetune:
logits = self.clf(x_cat, x_cont)
Expand All @@ -524,8 +521,6 @@ def fit(
)
loss_in_epoch_val += val_loss.item()

val_batch += 1

# loss average over all batches
train_loss_all = loss_in_epoch_train / len(train_loader_finetune)
val_loss_all = loss_in_epoch_val / len(val_loader_finetune)
Expand Down

0 comments on commit fc1637d

Please sign in to comment.