18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
s = re.sub(r"(select|exclude|define)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s)
s = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s)
s = re.sub(r"(switchGender|has(?:Mas|Fem)Form)[(]\\(\d+)", '\\1(lToken[\\2+nTokenOffset]["sValue"]', s)
s = re.sub(r"(morph|analyse)\(>1", 'g_\\1(lToken[nLastToken+1]', s) # next token
s = re.sub(r"(morph|analyse)\(<1", 'g_\\1(lToken[nTokenOffset]', s) # previous token
s = re.sub(r"[\\](\d+)\.is(upper|lower|title)\(\)", 'lToken[\\1+nTokenOffset]["sValue"].is(\\2)()', s)
s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s)
if bTokenValue:
# token values are used as parameter
s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]["sValue"]', s)
else:
# tokens used as parameter
s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]', s)
return s
|
>
>
>
>
|
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
s = re.sub(r"(select|exclude|define)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s)
s = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s)
s = re.sub(r"(switchGender|has(?:Mas|Fem)Form)[(]\\(\d+)", '\\1(lToken[\\2+nTokenOffset]["sValue"]', s)
s = re.sub(r"(morph|analyse)\(>1", 'g_\\1(lToken[nLastToken+1]', s) # next token
s = re.sub(r"(morph|analyse)\(<1", 'g_\\1(lToken[nTokenOffset]', s) # previous token
s = re.sub(r"[\\](\d+)\.is(upper|lower|title)\(\)", 'lToken[\\1+nTokenOffset]["sValue"].is(\\2)()', s)
s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s)
s = re.sub(r"before\(\s*", 'look(sSentence[:m.start()], ', s) # before(s)
s = re.sub(r"after\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after(s)
s = re.sub(r"before0\(\s*", 'look(sSentence0[:m.start()], ', s) # before0(s)
s = re.sub(r"after0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after0(s)
if bTokenValue:
# token values are used as parameter
s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]["sValue"]', s)
else:
# tokens used as parameter
s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]', s)
return s
|
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
|
# creating file with all functions callable by rules
print(" creating callables...")
sPyCallables = "# generated code, do not edit\n"
#sJSCallables = "// generated code, do not edit\nconst oEvalFunc = {\n"
for sFuncName, sReturn in dFUNCTIONS.items():
if sFuncName.startswith("g_c_"): # condition
sParams = "lToken, nTokenOffset, nLastToken, sCountry, bCondMemo, dTags"
elif sFuncName.startswith("g_m_"): # message
sParams = "lToken, nTokenOffset"
elif sFuncName.startswith("g_s_"): # suggestion
sParams = "lToken, nTokenOffset"
elif sFuncName.startswith("g_p_"): # preprocessor
sParams = "lToken"
elif sFuncName.startswith("g_d_"): # disambiguator
|
|
|
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
|
# creating file with all functions callable by rules
print(" creating callables...")
sPyCallables = "# generated code, do not edit\n"
#sJSCallables = "// generated code, do not edit\nconst oEvalFunc = {\n"
for sFuncName, sReturn in dFUNCTIONS.items():
if sFuncName.startswith("g_c_"): # condition
sParams = "lToken, nTokenOffset, nLastToken, sCountry, bCondMemo, dTags, sSentence, sSentence0"
elif sFuncName.startswith("g_m_"): # message
sParams = "lToken, nTokenOffset"
elif sFuncName.startswith("g_s_"): # suggestion
sParams = "lToken, nTokenOffset"
elif sFuncName.startswith("g_p_"): # preprocessor
sParams = "lToken"
elif sFuncName.startswith("g_d_"): # disambiguator
|