8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
import json
import os
import grammalecte.ibdawg as ibdawg
from grammalecte.echo import echo
from grammalecte.str_transform import defineSuffixCode
import grammalecte.fr.conj as conj
class cd:
"""Context manager for changing the current working directory"""
def __init__ (self, newPath):
self.newPath = os.path.expanduser(newPath)
|
>
|
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
import json
import os
import grammalecte.ibdawg as ibdawg
from grammalecte.echo import echo
from grammalecte.str_transform import defineSuffixCode
import grammalecte.fr.conj as conj
import grammalecte.tokenizer as tkz
class cd:
"""Context manager for changing the current working directory"""
def __init__ (self, newPath):
self.newPath = os.path.expanduser(newPath)
|
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
|
def makeLocutions (sp, bJS=False):
"compile list of locutions in JSON"
print("> Locutions ", end="")
print("(Python et JavaScript)" if bJS else "(Python seulement)")
with open(sp+"/data/locutions.txt", 'r', encoding='utf-8') as hSrc:
dLocutions = {}
for sLine in hSrc.readlines():
if not sLine.startswith("#") and sLine.strip():
lElem = sLine.strip().split()
dCur = dLocutions
for sWord in lElem:
if sWord not in dCur and not sWord.startswith(":"):
dCur[sWord] = {}
if sWord not in dCur and sWord.startswith(":"):
dCur[sWord] = ''
dCur = dCur[sWord]
sCode = "# generated data (do not edit)\n\n" + \
"dLocutions = " + str(dLocutions) + "\n"
open(sp+"/modules/locutions_data.py", "w", encoding="utf-8", newline="\n").write(sCode)
if bJS:
open(sp+"/modules-js/locutions_data.json", "w", encoding="utf-8", newline="\n").write(json.dumps(dLocutions, ensure_ascii=False))
def before (spLaunch, dVars, bJS=False):
print("========== Build Hunspell dictionaries ==========")
makeDictionaries(spLaunch, dVars['oxt_version'])
def after (spLaunch, dVars, bJS=False):
print("========== Build French data ==========")
makeMfsp(spLaunch, bJS)
makeConj(spLaunch, bJS)
makePhonetTable(spLaunch, bJS)
makeLocutions(spLaunch, bJS)
|
|
>
<
|
>
>
|
|
<
<
>
|
|
|
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
|
def makeLocutions (sp, bJS=False):
"compile list of locutions in JSON"
print("> Locutions ", end="")
print("(Python et JavaScript)" if bJS else "(Python seulement)")
with open(sp+"/data/locutions.txt", 'r', encoding='utf-8') as hSrc:
dLocGraph = {}
oTokenizer = tkz.Tokenizer("fr")
for sLine in hSrc.readlines():
if not sLine.startswith("#") and sLine.strip():
dCur = dLocGraph
sLoc, sTag = sLine.strip().split("\t")
for oToken in oTokenizer.genTokens(sLoc.strip()):
sWord = oToken["sValue"]
if sWord not in dCur:
dCur[sWord] = {}
dCur = dCur[sWord]
dCur[":"] = sTag
sCode = "# generated data (do not edit)\n\n" + \
"dLocutions = " + str(dLocGraph) + "\n"
open(sp+"/modules/locutions_data.py", "w", encoding="utf-8", newline="\n").write(sCode)
if bJS:
open(sp+"/modules-js/locutions_data.json", "w", encoding="utf-8", newline="\n").write(json.dumps(dLocGraph, ensure_ascii=False))
def before (spLaunch, dVars, bJS=False):
print("========== Build Hunspell dictionaries ==========")
makeDictionaries(spLaunch, dVars['oxt_version'])
def after (spLaunch, dVars, bJS=False):
print("========== Build French data ==========")
makeMfsp(spLaunch, bJS)
makeConj(spLaunch, bJS)
makePhonetTable(spLaunch, bJS)
makeLocutions(spLaunch, bJS)
|