Skip to content

Commit 0f91298

Browse files
authored
Merge pull request #353 from Zurnaz/llama_tpu_tokenizer_fix
fix: tpu tokenizers errors
2 parents cb4af7e + d53726b commit 0f91298

File tree

2 files changed

+3
-3
lines changed

2 files changed

+3
-3
lines changed

modeling/inference_models/hf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def decode_wrapper(self, token_ids, *args, **kwargs):
5959
token_ids = [first]
6060
elif len(token_ids) > 0:
6161
first = int(token_ids[0])
62-
elif token_ids:
62+
elif token_ids is not None and len(token_ids) > 0:
6363
first = token_ids[0]
6464
result = original_decode(self, token_ids, *args, **kwargs)
6565
if first is not None and first in has_prefix_space:

modeling/inference_models/hf_mtj.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
ModelCapabilities,
1818
)
1919
from modeling.inference_models.hf import HFInferenceModel
20+
from modeling.tokenizer import GenericTokenizer
2021

2122
# This file shouldn't be imported unless using the TPU
2223
assert utils.koboldai_vars.use_colab_tpu
@@ -193,8 +194,7 @@ def _load(self, save_model: bool, initial_load: bool) -> None:
193194
utils.koboldai_vars.modeldim = int(
194195
tpu_mtj_backend.params.get("d_embed", tpu_mtj_backend.params["d_model"])
195196
)
196-
197-
self.tokenizer = tpu_mtj_backend.tokenizer
197+
self.tokenizer = GenericTokenizer(tpu_mtj_backend.tokenizer)
198198

199199
if (
200200
utils.koboldai_vars.badwordsids is koboldai_settings.badwordsids_default

0 commit comments

Comments
 (0)