Skip to content

Commit 387ce5c

Browse files
authored
update tokenizer regex (#58)
* update tokenizer regex * keep word regex and just handle underscores * update unit test * remove square brackets in regex
1 parent 50ddb11 commit 387ce5c

File tree

2 files changed

+12
-1
lines changed

2 files changed

+12
-1
lines changed

src/indexing/km_util.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,17 @@ def write_all_lines(path: str, items: 'list[str]') -> None:
5757
def get_tokens(text: str) -> 'list[str]':
5858
l_text = text.lower()
5959
tokens = tokenizer.tokenize(l_text)
60+
61+
# remove underscores
62+
if '_' in text:
63+
new_tokens = []
64+
65+
for token in tokens:
66+
spl = token.split('_')
67+
new_tokens.extend(spl)
68+
69+
tokens = new_tokens
70+
6071
return tokens
6172

6273
def sanitize_text(text: str) -> str:

src/tests/test_index_building.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def delete_existing_index(data_dir):
2323
assert not os.path.exists(index_dir)
2424

2525
def test_tokenization():
26-
text = "The quick brown fox jumped over the lazy dog."
26+
text = "The_quick brown fox jumped over the lazy dog."
2727

2828
tokens = util.get_tokens(text)
2929
assert "the" in tokens

0 commit comments

Comments
 (0)