-
Notifications
You must be signed in to change notification settings - Fork 2
/
affixes2analysis.py
95 lines (85 loc) · 3.16 KB
/
affixes2analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# affix2analylexc.py:
# builds a lexc file for analysis out of a csv file of affixes
copyright = """Copyright © 2017, Kimmo Koskenniemi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re, csv, sys
import argparse
import argparse
argparser = argparse.ArgumentParser(
"python3 affies2analysis.py",
description="Converts an affix CSV file into an analyzing affix LEXC file")
argparser.add_argument(
"infile", help="input CSV file containing the affix data")
argparser.add_argument(
"outfile", help="output LEXC file of the affix data")
argparser.add_argument("-d", "--delimiter", default=",",
help="CSV field delimiter (default is ',')")
argparser.add_argument("-e", "--entry-mode", action="store_true",
help="include the continuation lexicon in the analysis")
argparser.add_argument("-v", "--verbosity", default=0, type=int,
help="level of diagnostic output")
args = argparser.parse_args()
features = set()
multichars = set()
def collect_multichars(str):
if len(str) < 2: return
lst = re.findall(r"[{][^}]+[}]", str)
for mch in lst:
multichars.add(mch)
return
out_lst = []
features = set()
nexts = set()
infile = open(args.infile, "r")
rdr = csv.DictReader(infile, delimiter=args.delimiter)
prevID = ",,"
for r in rdr:
if args.verbosity >= 10:
print(r)
if r["NEXT"] == '' or r["NEXT"][0] == '!':
continue
ide = prevID if r["ID"] == '' else r["ID"]
if prevID != ide:
prevID = ide
out_lst.append("LEXICON %s" % ide)
collect_multichars(r["MPHON"])
if r['FEAT'] == '' and r['BASEF'] == '':
r['BASEF'] = r['MPHON']
if r['BASEF'] == "!":
r['BASEF'] = ""
if r['FEAT']:
featlist = re.split(" +", r['FEAT'])
feat_str = '+' + '+'.join(featlist)
for feat in featlist:
features.add("+" + feat)
else:
feat_str = ''
if "/" in ide and args.entry_mode:
feat_str = "% " + ide + "%;" + feat_str
features.add("% " + ide + "%;")
for next in re.split(" +", r["NEXT"]):
if next:
if r['BASEF'] + feat_str == r['MPHON']:
out_lst.append("{}{} {};".format(r['BASEF'], feat_str, next))
else:
out_lst.append("{}{}:{} {};".format(r['BASEF'], feat_str, r['MPHON'], next))
outfile = open(args.outfile, "w")
print("Multichar_Symbols", file=outfile)
multichar_lst = sorted(list(multichars))
multichar_str = " ".join(multichar_lst)
print(multichar_str, file=outfile)
features_lst = sorted(list(features))
print(" ".join(features_lst), file=outfile)
for line in out_lst:
print(line, file=outfile)
outfile.close()