Skip to content

Commit 76894a5

Browse files
committed
updating and cleaning
1 parent 9aee5f5 commit 76894a5

File tree

16 files changed

+1213
-276
lines changed

16 files changed

+1213
-276
lines changed

app/main.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@
2121
from pathlib import Path
2222

2323
import logging
24-
#from app.custom_logging import CustomizeLogger
24+
25+
# from app.custom_logging import CustomizeLogger
2526

2627
logger = logging.getLogger(__name__)
2728
config_path = Path(__file__).with_name("logging_config.json")
@@ -32,8 +33,8 @@
3233
app = FastAPI()
3334
app.mount("/static", StaticFiles(directory="./app/static"), name="static")
3435

35-
#logger = CustomizeLogger.make_logger(config_path)
36-
#app.logger = logger
36+
# logger = CustomizeLogger.make_logger(config_path)
37+
# app.logger = logger
3738

3839
app.include_router(create.router)
3940

@@ -56,8 +57,9 @@
5657

5758

5859
@app.get("/")
59-
def root(request: Request,):
60-
60+
def root(
61+
request: Request,
62+
):
6163
return templates.TemplateResponse("login.html", {"request": request})
6264

6365

app/routers/corpus.py

Lines changed: 47 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -13,87 +13,94 @@
1313
from functools import lru_cache
1414
import importlib
1515
from ..util.manage_corpus import make_corpus
16+
1617
templates = Jinja2Templates(directory="app/templates")
1718

1819
router = APIRouter(dependencies=[Depends(get_current_username)])
1920

20-
Token = namedtuple("Token", ["text", "lemma_", "pos_", "ent_type_","is_stop"])
21+
Token = namedtuple("Token", ["text", "lemma_", "pos_", "ent_type_", "is_stop"])
22+
2123

2224
@router.get("/update_corpus")
2325
async def update_corpus(background_tasks: BackgroundTasks):
2426
background_tasks.add_task(make_corpus)
2527

2628

27-
2829
###########
2930
# Lemmata #
3031
###########
3132

33+
3234
@router.get("/update_lemma")
33-
async def update_lemma(word:str,lemma:str):
35+
async def update_lemma(word: str, lemma: str):
3436
# load lemma file, needed because we update the file
3537
new_lang = Path.cwd() / "new_lang"
3638
lang_name = list(new_lang.iterdir())[0].name
3739
if len(list(new_lang.iterdir())) > 0:
3840
lookups_path = new_lang / lang_name / "lookups"
3941
for lookup in lookups_path.iterdir():
40-
key = lookup.stem[lookup.stem.find('_') + 1:]
41-
if 'lemma' in key:
42+
key = lookup.stem[lookup.stem.find("_") + 1 :]
43+
if "lemma" in key:
4244
lemma_file = lookup
4345
lemma_data = srsly.read_json(lookup)
4446

45-
# remove any accidental spaces
47+
# remove any accidental spaces
4648
word = word.strip()
4749
lemma = lemma.strip()
4850
lemma_data[word] = lemma
4951
srsly.write_json(lemma_file, lemma_data)
5052

53+
5154
###########
5255
# POS #
5356
###########
5457

58+
5559
@router.get("/update_pos")
56-
async def update_pos(word:str,pos:str):
60+
async def update_pos(word: str, pos: str):
5761
# load lemma file, needed because we update the file
5862
new_lang = Path.cwd() / "new_lang"
5963
lang_name = list(new_lang.iterdir())[0].name
6064
if len(list(new_lang.iterdir())) > 0:
6165
lookups_path = new_lang / lang_name / "lookups"
6266
for lookup in lookups_path.iterdir():
63-
key = lookup.stem[lookup.stem.find('_') + 1:]
64-
if 'pos' in key:
67+
key = lookup.stem[lookup.stem.find("_") + 1 :]
68+
if "pos" in key:
6569
pos_file = lookup
6670
pos_data = srsly.read_json(lookup)
6771

68-
# remove any accidental spaces
72+
# remove any accidental spaces
6973
word = word.strip()
7074
pos = pos.strip()
7175
pos_data[word] = pos
7276
srsly.write_json(pos_file, pos_data)
7377

78+
7479
###########
7580
# POS #
7681
###########
7782

83+
7884
@router.get("/update_features")
79-
async def update_features(word:str,features:str):
85+
async def update_features(word: str, features: str):
8086
# load lemma file, needed because we update the file
8187
new_lang = Path.cwd() / "new_lang"
8288
lang_name = list(new_lang.iterdir())[0].name
8389
if len(list(new_lang.iterdir())) > 0:
8490
lookups_path = new_lang / lang_name / "lookups"
8591
for lookup in lookups_path.iterdir():
86-
key = lookup.stem[lookup.stem.find('_') + 1:]
87-
if 'features' in key:
92+
key = lookup.stem[lookup.stem.find("_") + 1 :]
93+
if "features" in key:
8894
features_file = lookup
8995
features_data = srsly.read_json(lookup)
9096

91-
# remove any accidental spaces
97+
# remove any accidental spaces
9298
word = word.strip()
9399
features = features.strip()
94100
features_data[word] = features
95101
srsly.write_json(features_file, features_data)
96102

103+
97104
##############
98105
# Stop words #
99106
##############
@@ -109,40 +116,42 @@ def load_stopwords():
109116
return STOP_WORDS
110117

111118

112-
def is_stop(word:str, STOP_WORDS:Set):
119+
def is_stop(word: str, STOP_WORDS: Set):
113120
if word in STOP_WORDS:
114121
return "☑"
115-
else:
122+
else:
116123
return "☐"
117124

125+
118126
@router.get("/add_stopword")
119-
def add_stopword(word:str):
127+
def add_stopword(word: str):
120128
new_lang = Path.cwd() / "new_lang"
121129
if len(list(new_lang.iterdir())) > 0:
122130
path = list(new_lang.iterdir())[0]
123131
path = path / "stop_words.py"
124132
text = path.read_text()
125-
#Edit only the stopwords string, otherwise we could replace other sections of the file
133+
# Edit only the stopwords string, otherwise we could replace other sections of the file
126134
start = text.find('"""')
127135
end = text.find('""".split()')
128136
stopwords = text[start:end]
129137
if not word in stopwords:
130-
stopwords = stopwords + ' ' + word
138+
stopwords = stopwords + " " + word
131139
text = text[:start] + stopwords + text[end:]
132140
path.write_text(text)
133141

142+
134143
@router.get("/delete_stopword")
135-
def delete_stopword(word:str):
144+
def delete_stopword(word: str):
136145
new_lang = Path.cwd() / "new_lang"
137146
if len(list(new_lang.iterdir())) > 0:
138147
path = list(new_lang.iterdir())[0]
139148
path = path / "stop_words.py"
140149
text = path.read_text()
141-
#Edit only the stopwords string, otherwise we could replace other sections of the file
150+
# Edit only the stopwords string, otherwise we could replace other sections of the file
142151
start = text.find('"""')
143152
end = text.find('""".split()')
144153
stopwords = text[start:end]
145-
stopwords = re.sub(fr'\b{word}\b', '', stopwords)
154+
stopwords = re.sub(rf"\b{word}\b", "", stopwords)
146155
text = text[:start] + stopwords + text[end:]
147156
path.write_text(text)
148157

@@ -151,39 +160,44 @@ def delete_stopword(word:str):
151160
# Corpus Page #
152161
###############
153162

154-
#@lru_cache removing to work on 3.6, but is good
163+
164+
# @lru_cache removing to work on 3.6, but is good
155165
def load_lookups():
156166
new_lang = Path.cwd() / "new_lang"
157167
if len(list(new_lang.iterdir())) > 0:
158168
lang_name = list(new_lang.iterdir())[0].name
159169
lookups_path = new_lang / lang_name / "lookups"
160170
for lookup in lookups_path.iterdir():
161-
key = lookup.stem[lookup.stem.find('_') + 1:]
162-
if 'lemma' in key:
171+
key = lookup.stem[lookup.stem.find("_") + 1 :]
172+
if "lemma" in key:
163173
lemma_data = srsly.read_json(lookup)
164-
if 'features' in key:
174+
if "features" in key:
165175
features_data = srsly.read_json(lookup)
166-
if 'pos' in key:
176+
if "pos" in key:
167177
pos_data = srsly.read_json(lookup)
168-
return lemma_data,features_data,pos_data
178+
return lemma_data, features_data, pos_data
169179

170180

171181
@router.get("/corpus")
172182
async def read_items(request: Request):
173-
174183
new_lang = Path.cwd() / "new_lang"
175184
if len(list(new_lang.iterdir())) > 0:
176185
lang_name = list(new_lang.iterdir())[0].name
177186
corpus_dir = new_lang / lang_name / "corpus_json"
178-
tokens_json = srsly.read_json((corpus_dir / 'tokens.json'))
179-
stats = srsly.read_json((corpus_dir / 'stats.json'))
187+
tokens_json = srsly.read_json((corpus_dir / "tokens.json"))
188+
stats = srsly.read_json((corpus_dir / "stats.json"))
180189
stats = srsly.json_loads(stats)
181190
nlp = get_nlp()
182-
writing_system = nlp.vocab.writing_system['direction']
191+
writing_system = nlp.vocab.writing_system["direction"]
183192

184193
return templates.TemplateResponse(
185194
"corpus.html",
186-
{"request": request, "stats": stats, "tokens_json": tokens_json, "writing_system":writing_system},
195+
{
196+
"request": request,
197+
"stats": stats,
198+
"tokens_json": tokens_json,
199+
"writing_system": writing_system,
200+
},
187201
)
188202
else:
189203
return templates.TemplateResponse(

app/routers/edit_code.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,9 @@ async def edit_code(
4444

4545

4646
@router.post("/edit")
47-
async def update_code(request: Request,):
48-
47+
async def update_code(
48+
request: Request,
49+
):
4950
data = await request.json()
5051
type = data["type"]
5152
code = data["code"]
@@ -63,17 +64,16 @@ async def update_code(request: Request,):
6364
"edit_code.html", {"request": request, "code": code}
6465
)
6566

67+
6668
@router.get("/check_logs")
6769
async def check_logs(
6870
request: Request,
69-
7071
):
71-
context = {}
72-
context['request'] = request
72+
context = {}
73+
context["request"] = request
7374
log_file = Path.cwd() / "error.log"
7475
if log_file.exists():
7576
context["code"] = log_file.read_text()
7677
else:
7778
raise HTTPException(status_code=404, detail="File not found")
7879
return templates.TemplateResponse("edit_code.html", context)
79-

0 commit comments

Comments
 (0)