Skip to content

Commit

Permalink
fixed ruff misbehaving
Browse files Browse the repository at this point in the history
  • Loading branch information
ilsenatorov committed Aug 16, 2024
1 parent 697926b commit dd44b6f
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 9 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ target-version = "py311"
line-length = 119
fix = true
lint.select = ["E","F","W","B"]
lint.ignore = ["E501", "F841", "F811"]
lint.ignore = ["E501", "F841", "F811", "F401"]

[tool.pytest.ini_options]
testpaths = ["smtb/tests"]
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
torch
pytorch-lightning
lightning
torchmetrics
fair-esm
jsonargparse
Expand Down
4 changes: 2 additions & 2 deletions smtb/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# from beartype.claw import beartype_this_package
from beartype.claw import beartype_this_package

# beartype_this_package()
beartype_this_package()
1 change: 1 addition & 0 deletions smtb/tests/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import torch

from ..data import DownstreamDataModule, DownstreamDataset
from .fixtures import mock_data_dir


def test_downstream_dataset(mock_data_dir):
Expand Down
1 change: 1 addition & 0 deletions smtb/tests/test_finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import pytest

from ..train import train
from .fixtures import mock_data_dir


@pytest.fixture
Expand Down
10 changes: 5 additions & 5 deletions smtb/tokenization.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,21 @@

import transformers
from tokenizers import Tokenizer
from tokenizers.models import BPE, Model, Unigram, WordPiece
from tokenizers.models import BPE, Unigram, WordPiece
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import BpeTrainer, Trainer, UnigramTrainer, WordPieceTrainer
from tokenizers.trainers import BpeTrainer, UnigramTrainer, WordPieceTrainer
from transformers import PreTrainedTokenizerFast

TOKENIZATION_TYPES = Literal["bpe", "wordpiece", "unigram", "char"]


def _get_tokenizer(
model: Model,
trainer: Trainer,
model,
trainer,
vocab_size: int,
model_kwargs: dict | None = None,
trainer_kwargs: dict | None = None,
) -> Tokenizer:
):
if model_kwargs is None:
model_kwargs = dict(unk_token="[UNK]")
if trainer_kwargs is None:
Expand Down

0 comments on commit dd44b6f

Please sign in to comment.