-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
95 lines (80 loc) · 2.62 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
from src import BaseTokenizer
# from utils.app_settings import available_tokenizers
from src import BPETokenizer, HGFBPETokenizer, TikTokenizer
from utils.settings import *
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List
app = FastAPI()
from src.openai.tokenizer import tiktoken_models
openai_tokenizers = {
name: TikTokenizer(model_name=name, special_tokens = CONTROL_TOKENS_LIST)
for name in tiktoken_models
}
tokenizers = {
"custom_bpe": BPETokenizer(
split_pattern=TOKEN_SPLIT_PATTERN,
directory=DATA_FOLDER.joinpath("custom_bpe"),
vocab_file=Path("vocab.json"),
special_tokens=CONTROL_TOKENS_LIST
),
"hgface_bpe": HGFBPETokenizer(
split_pattern=TOKEN_SPLIT_PATTERN,
directory=DATA_FOLDER.joinpath("hgface_bpe"),
vocab_file=Path("vocab.json"),
model_file=Path("trainer.pkl"),
special_tokens=CONTROL_TOKENS_LIST
),
# "tiktoken": TikTokenizer, # special
}
tokenizer_options = [
{
"label": "custom",
"options": [{ "value": tok } for tok in list(tokenizers.keys())],
},
{
"label": "openai",
"options": [{ "value": tok } for tok in list(openai_tokenizers.keys())],
}
]
tokenizers = tokenizers | openai_tokenizers
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:3000",
],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Text2Encode(BaseModel):
tokenizer: str | None
text: str
# @app.get("/tokenizers")
# async def get_tokenizers():
# tknzrs = list(tokenizers.keys())
# return {"tokenizers": tknzrs}
@app.get("/tokenizers")
async def get_tokenizers():
return tokenizer_options
@app.post("/tokenize")
async def tokenize_text(data: Text2Encode):
tokenizer_name, text = data.tokenizer if data.tokenizer else "hgface_bpe", str(data.text)
if len(text) == 0:
return {"tokens": [], "words": [""]}
tokenizer: BaseTokenizer = tokenizers.get(tokenizer_name) # TODO: handdle parameters automatically here
if not tokenizer:
return {"error": "tokenizer not implemented"}
encoded = tokenizer.encode(text, retrieve_splitted_text=True, verbose=False)
if type(encoded[0]) == int:
words = [""]
else:
words, encoded = zip(*encoded)
return {"tokens": encoded, "words": words}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)