1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
+
+
+
+
-
-
+
+
-
-
+
-
+
-
-
-
+
+
+
+
-
-
-
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
|
"""
Grammalecte
Grammar checker engine
"""
import re
import traceback
import json
import importlib
#import unicodedata
from itertools import chain
from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from .. import text
from . import gc_engine_func as gce_func
from . import gc_options
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_bWriterError = True
except ImportError:
_bWriterError = False
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getSpellChecker", \
#__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
# "load", "parse", "getSpellChecker", "getTextFormatter", "getLexicographer" \
"setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
"ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules", "setWriterUnderliningStyle" ]
# "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules", "setWriterUnderliningStyle" ]
__version__ = "${version}"
lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"
# Modules
_rules = None # module gc_rules
_rules_graph = None # module gc_rules_graph
# Data
# Tools
_sAppContext = "" # what software is running
_dOptions = None
_dOptionsColors = None
_oSpellChecker = None
_oTokenizer = None
_oLexicographer = None
# Data
_aIgnoredRules = set()
# Writer underlining style
_dOptionsColors = None
_bMulticolor = True
_nUnderliningStyle = 0
#### Initialization
def load (sContext="Python", sColorType="aRGB"):
"initialization of the grammar checker"
global _oSpellChecker
global _sAppContext
global _dOptions
global _dOptionsColors
global _oTokenizer
try:
_oSpellChecker = SpellChecker("${lang}", "${dic_main_filename_py}", "${dic_community_filename_py}", "${dic_personal_filename_py}")
_sAppContext = sContext
_oSpellChecker.activateStorage()
_dOptions = gc_options.getOptions(sContext).copy() # duplication necessary, to be able to reset to default
_dOptionsColors = gc_options.getOptionsColors(sContext, sColorType)
_oTokenizer = _oSpellChecker.getTokenizer()
_oSpellChecker.activateStorage()
gce_func.load(sContext, _oSpellChecker)
gc_options.load(sContext)
_dOptionsColors = gc_options.getOptionsColors(sContext, sColorType)
except:
traceback.print_exc()
#### Tools
def getSpellChecker ():
"return the spellchecker object"
return _oSpellChecker
def getLexicographer ():
"load and return the lexicographer"
global _oLexicographer
if _oLexicographer is None:
lxg = importlib.import_module(".lexicographe", "grammalecte.${lang}")
_oLexicographer = lxg.Lexicographe(_oSpellChecker)
return _oLexicographer
#### Rules
def _getRules (bParagraph):
try:
if not bParagraph:
return _rules.lSentenceRules
|
︙ | | |
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
|
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
|
-
+
|
def reactivateRule (sRuleId):
"(re)activate rule <sRuleId>"
_aIgnoredRules.discard(sRuleId)
def listRules (sFilter=None):
"generator: returns typle (sOption, sLineId, sRuleId)"
"generator: returns tuple (sRuleType, sOption, sLineId, sRuleId)"
if sFilter:
try:
zFilter = re.compile(sFilter)
except re.error:
echo("# Error. List rules: wrong regex.")
sFilter = None
# regex rules
|
︙ | | |
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
|
166
167
168
169
170
171
172
173
174
175
176
177
178
179
|
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
|
def displayRules (sFilter=None):
"display the name of rules, with the filter <sFilter>"
echo("List of rules. Filter: << " + str(sFilter) + " >>")
for sOption, sLineId, sRuleId, sType in listRules(sFilter):
echo("{:<8} {:<10} {:<10} {}".format(sOption, sLineId, sRuleId, sType))
#### Options
def setOption (sOpt, bVal):
"set option <sOpt> with <bVal> if it exists"
if sOpt in _dOptions:
_dOptions[sOpt] = bVal
def setOptions (dOpt):
"update the dictionary of options with <dOpt>"
for sKey, bVal in dOpt.items():
if sKey in _dOptions:
_dOptions[sKey] = bVal
def getOptions ():
"return the dictionary of current options"
return _dOptions
def getDefaultOptions ():
"return the dictionary of default options"
return gc_options.getOptions(_sAppContext).copy()
def getOptionsLabels (sLang):
"return options labels"
return gc_options.getUI(sLang)
def displayOptions (sLang="${lang}"):
"display the list of grammar checking options"
echo("Options:")
echo("\n".join( [ k+":\t"+str(v)+"\t"+gc_options.getUI(sLang).get(k, ("?", ""))[0] for k, v in sorted(_dOptions.items()) ] ))
echo("")
def resetOptions ():
"set options to default values"
global _dOptions
_dOptions = getDefaultOptions()
def setWriterUnderliningStyle (sStyle="BOLDWAVE", bMulticolor=True):
"set underlining style for Writer (WAVE, BOLDWAVE, BOLD)"
global _nUnderliningStyle
global _bMulticolor
# https://api.libreoffice.org/docs/idl/ref/FontUnderline_8idl.html
# WAVE: 10, BOLD: 12, BOLDWAVE: 18 DASH: 5
|
︙ | | |
217
218
219
220
221
222
223
224
225
226
227
228
229
230
|
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
|
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
|
_nUnderliningStyle = 5
else:
_nUnderliningStyle = 0
_bMulticolor = bMulticolor
#### Parsing
def getParagraphErrors (sText, dOptions=None, bContext=False, bSpellSugg=False, bDebug=False):
"returns a tuple: (grammar errors, spelling errors)"
aGrammErrs = parse(sText, "FR", bDebug=bDebug, dOptions=dOptions, bContext=bContext)
aSpellErrs = _oSpellChecker.parseParagraph(sText, bSpellSugg)
return aGrammErrs, aSpellErrs
def getParagraphWithErrors (sText, dOptions=None, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100, bDebug=False):
"parse text and return a readable text with underline errors"
aGrammErrs, aSpellErrs = getParagraphErrors(sText, dOptions, False, bSpellSugg, bDebug)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ("", [])
return text.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)
def getParagraphErrorsAsJSON (iIndex, sText, dOptions=None, bContext=False, bEmptyIfNoErrors=False, bSpellSugg=False, bReturnText=False, lLineSet=None, bDebug=False):
"parse text and return errors as a JSON string"
aGrammErrs, aSpellErrs = getParagraphErrors(sText, dOptions, bContext, bSpellSugg, bDebug)
aGrammErrs = list(aGrammErrs)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ""
if lLineSet:
aGrammErrs, aSpellErrs = text.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
if bReturnText:
return json.dumps({ "iParagraph": iIndex, "sText": sText, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
return json.dumps({ "iParagraph": iIndex, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"init point to analyse <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
oText = TextParser(sText)
return oText.parse(sCountry, bDebug, dOptions, bContext, bFullInfo)
|
︙ | | |
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
|
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
|
-
-
+
+
|
#for nPos, dToken in self.dTokenPos.items():
# s += "{}\t{}\n".format(nPos, dToken)
return s
def parse (self, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"analyses <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
#sText = unicodedata.normalize("NFC", sText)
dOpt = dOptions or _dOptions
bShowRuleId = option('idrule')
dOpt = dOptions or gc_options.dOptions
bShowRuleId = gc_options.dOptions.get('idrule', False)
# parse paragraph
try:
self.parseText(self.sText, self.sText0, True, 0, sCountry, dOpt, bShowRuleId, bDebug, bContext)
except:
raise
if bFullInfo:
lParagraphErrors = list(self.dError.values())
|
︙ | | |
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
|
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
|
-
+
-
+
|
for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(sText):
bCondMemo = None
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
bCondMemo = not sFuncCond or globals()[sFuncCond](sText, sText0, m, self.dTokenPos, sCountry, bCondMemo)
bCondMemo = not sFuncCond or getattr(gce_func, sFuncCond)(sText, sText0, m, self.dTokenPos, sCountry, bCondMemo)
if bCondMemo:
if bDebug:
echo("RULE: " + sLineId)
if cActionType == "-":
# grammar error
nErrorStart = nOffset + m.start(eAct[0])
if nErrorStart not in self.dError or nPriority > self.dErrorPriority.get(nErrorStart, -1):
self.dError[nErrorStart] = self._createErrorFromRegex(sText, sText0, sWhat, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bShowRuleId, sOption, bContext)
self.dErrorPriority[nErrorStart] = nPriority
self.dSentenceError[nErrorStart] = self.dError[nErrorStart]
elif cActionType == "~":
# text processor
sText = self.rewriteText(sText, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo("~ " + sText + " -- " + m.group(eAct[0]) + " # " + sLineId)
elif cActionType == "=":
# disambiguation
if not bParagraph:
globals()[sWhat](sText, m, self.dTokenPos)
getattr(gce_func, sWhat)(sText, m, self.dTokenPos)
if bDebug:
echo("= " + m.group(0) + " # " + sLineId)
elif cActionType == ">":
# we do nothing, this test is just a condition to apply all following actions
pass
else:
echo("# error: unknown action at " + sLineId)
|
︙ | | |
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
|
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
|
-
-
-
-
-
-
+
+
+
+
+
+
-
+
|
for sLineId, nextNodeKey in dNode.items():
bCondMemo = None
for sRuleId in dGraph[nextNodeKey]:
try:
if bDebug:
echo(" >TRY: " + sRuleId + " " + sLineId)
_, sOption, sFuncCond, cActionType, sWhat, *eAct = _rules_graph.dRule[sRuleId]
# Suggestion [ option, condition, "-", replacement/suggestion/action, iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, sURL ]
# TextProcessor [ option, condition, "~", replacement/suggestion/action, iTokenStart, iTokenEnd, bCaseSvty ]
# Disambiguator [ option, condition, "=", replacement/suggestion/action ]
# Tag [ option, condition, "/", replacement/suggestion/action, iTokenStart, iTokenEnd ]
# Immunity [ option, condition, "!", "", iTokenStart, iTokenEnd ]
# Test [ option, condition, ">", "" ]
# Suggestion [ sActionLineId, option, condition, "-", replacement/suggestion/action, iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, sURL ]
# TextProcessor [ sActionLineId, option, condition, "~", replacement/suggestion/action, iTokenStart, iTokenEnd, bCaseSvty ]
# Disambiguator [ sActionLineId, option, condition, "=", replacement/suggestion/action ]
# Tag [ sActionLineId, option, condition, "/", replacement/suggestion/action, iTokenStart, iTokenEnd ]
# Immunity [ sActionLineId, option, condition, "!", "", iTokenStart, iTokenEnd ]
# Test [ sActionLineId, option, condition, ">", "" ]
if not sOption or dOptions.get(sOption, False):
bCondMemo = not sFuncCond or globals()[sFuncCond](self.lToken, nTokenOffset, nLastToken, sCountry, bCondMemo, self.dTags, self.sSentence, self.sSentence0)
bCondMemo = not sFuncCond or getattr(gce_func, sFuncCond)(self.lToken, nTokenOffset, nLastToken, sCountry, bCondMemo, self.dTags, self.sSentence, self.sSentence0)
if bCondMemo:
if cActionType == "-":
# grammar error
iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, sURL = eAct
nTokenErrorStart = nTokenOffset + iTokenStart if iTokenStart > 0 else nLastToken + iTokenStart
if "bImmune" not in self.lToken[nTokenErrorStart]:
nTokenErrorEnd = nTokenOffset + iTokenEnd if iTokenEnd > 0 else nLastToken + iTokenEnd
|
︙ | | |
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
|
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
|
-
+
|
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
self._tagAndPrepareTokenForRewriting(sWhat, nTokenStart, nTokenEnd, nTokenOffset, nLastToken, eAct[2], bDebug)
bChange = True
if bDebug:
echo(" TEXT_PROCESSOR: [{}:{}] > {}".format(self.lToken[nTokenStart]["sValue"], self.lToken[nTokenEnd]["sValue"], sWhat))
elif cActionType == "=":
# disambiguation
globals()[sWhat](self.lToken, nTokenOffset, nLastToken)
getattr(gce_func, sWhat)(self.lToken, nTokenOffset, nLastToken)
if bDebug:
echo(" DISAMBIGUATOR: ({}) [{}:{}]".format(sWhat, self.lToken[nTokenOffset+1]["sValue"], self.lToken[nLastToken]["sValue"]))
elif cActionType == ">":
# we do nothing, this test is just a condition to apply all following actions
if bDebug:
echo(" COND_OK")
elif cActionType == "/":
|
︙ | | |
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
|
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
|
-
+
-
+
-
+
-
+
|
return bChange
def _createErrorFromRegex (self, sText, sText0, sRepl, nOffset, m, iGroup, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext):
nStart = nOffset + m.start(iGroup)
nEnd = nOffset + m.end(iGroup)
# suggestions
if sRepl[0:1] == "=":
sSugg = globals()[sRepl[1:]](sText, m)
sSugg = getattr(gce_func, sRepl[1:])(sText, m)
lSugg = sSugg.split("|") if sSugg else []
elif sRepl == "_":
lSugg = []
else:
lSugg = m.expand(sRepl).split("|")
if bUppercase and lSugg and m.group(iGroup)[0:1].isupper():
lSugg = list(map(lambda s: s[0:1].upper()+s[1:], lSugg))
# Message
sMessage = globals()[sMsg[1:]](sText, m) if sMsg[0:1] == "=" else m.expand(sMsg)
sMessage = getattr(gce_func, sMsg[1:])(sText, m) if sMsg[0:1] == "=" else m.expand(sMsg)
if bShowRuleId:
sMessage += " #" + sLineId + " / " + sRuleId
#
if _bWriterError:
return self._createErrorForWriter(nStart, nEnd - nStart, sRuleId, sOption, sMessage, lSugg, sURL)
return self._createErrorAsDict(nStart, nEnd, sLineId, sRuleId, sOption, sMessage, lSugg, sURL, bContext)
def _createErrorFromTokens (self, sSugg, nTokenOffset, nLastToken, iFirstToken, nStart, nEnd, sLineId, sRuleId, bCaseSvty, sMsg, sURL, bShowRuleId, sOption, bContext):
# suggestions
if sSugg[0:1] == "=":
sSugg = globals()[sSugg[1:]](self.lToken, nTokenOffset, nLastToken)
sSugg = getattr(gce_func, sSugg[1:])(self.lToken, nTokenOffset, nLastToken)
lSugg = sSugg.split("|") if sSugg else []
elif sSugg == "_":
lSugg = []
else:
lSugg = self._expand(sSugg, nTokenOffset, nLastToken).split("|")
if bCaseSvty and lSugg and self.lToken[iFirstToken]["sValue"][0:1].isupper():
lSugg = list(map(lambda s: s[0:1].upper()+s[1:], lSugg))
# Message
sMessage = globals()[sMsg[1:]](self.lToken, nTokenOffset, nLastToken) if sMsg[0:1] == "=" else self._expand(sMsg, nTokenOffset, nLastToken)
sMessage = getattr(gce_func, sMsg[1:])(self.lToken, nTokenOffset, nLastToken) if sMsg[0:1] == "=" else self._expand(sMsg, nTokenOffset, nLastToken)
if bShowRuleId:
sMessage += " #" + sLineId + " / " + sRuleId
#
if _bWriterError:
return self._createErrorForWriter(nStart, nEnd - nStart, sRuleId, sOption, sMessage, lSugg, sURL)
return self._createErrorAsDict(nStart, nEnd, sLineId, sRuleId, sOption, sMessage, lSugg, sURL, bContext)
|
︙ | | |
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
|
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
|
-
+
|
if sRepl == "*":
sNew = " " * nLen
elif sRepl == "_":
sNew = "_" * nLen
elif sRepl == "@":
sNew = "@" * nLen
elif sRepl[0:1] == "=":
sNew = globals()[sRepl[1:]](sText, m)
sNew = getattr(gce_func, sRepl[1:])(sText, m)
sNew = sNew + " " * (nLen-len(sNew))
if bUppercase and m.group(iGroup)[0:1].isupper():
sNew = sNew.capitalize()
else:
sNew = m.expand(sRepl)
sNew = sNew + " " * (nLen-len(sNew))
return sText[0:m.start(iGroup)] + sNew + sText[m.end(iGroup):]
|
︙ | | |
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
|
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
|
-
+
|
if nTokenRewriteEnd - nTokenRewriteStart == 0:
self.lToken[nTokenRewriteStart]["sNewValue"] = "_"
else:
for i in range(nTokenRewriteStart, nTokenRewriteEnd+1):
self.lToken[i]["sNewValue"] = "_"
else:
if sWhat.startswith("="):
sWhat = globals()[sWhat[1:]](self.lToken, nTokenOffset, nLastToken)
sWhat = getattr(gce_func, sWhat[1:])(self.lToken, nTokenOffset, nLastToken)
else:
sWhat = self._expand(sWhat, nTokenOffset, nLastToken)
bUppercase = bCaseSvty and self.lToken[nTokenRewriteStart]["sValue"][0:1].isupper()
if nTokenRewriteEnd - nTokenRewriteStart == 0:
# one token
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
|
︙ | | |
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
|
868
869
870
871
872
873
874
|
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
|
except KeyError:
echo(self)
echo(dToken)
if bDebug:
echo(" TEXT REWRITED: " + self.sSentence)
self.lToken.clear()
self.lToken = lNewToken
#### common functions
def option (sOpt):
"return True if option <sOpt> is active"
return _dOptions.get(sOpt, False)
#### Functions to get text outside pattern scope
# warning: check compile_rules.py to understand how it works
_zNextWord = re.compile(r" +(\w[\w-]*)")
_zPrevWord = re.compile(r"(\w[\w-]*) +$")
def nextword (s, iStart, n):
"get the nth word of the input string or empty string"
m = re.match("(?: +[\\w%-]+){" + str(n-1) + "} +([\\w%-]+)", s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword (s, iEnd, n):
"get the (-)nth word of the input string or empty string"
m = re.search("([\\w%-]+) +(?:[\\w%-]+ +){" + str(n-1) + "}$", s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def nextword1 (s, iStart):
"get next word (optimization)"
m = _zNextWord.match(s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword1 (s, iEnd):
"get previous word (optimization)"
m = _zPrevWord.search(s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def look (s, sPattern, sNegPattern=None):
"seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
if sNegPattern and re.search(sNegPattern, s):
return False
if re.search(sPattern, s):
return True
return False
def look_chk1 (dTokenPos, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=""):
"returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
m = re.search(sPattern, s)
if not m:
return False
try:
sWord = m.group(1)
nPos = m.start(1) + nOffset
except IndexError:
return False
return morph(dTokenPos, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
#### Analyse groups for regex rules
def displayInfo (dTokenPos, tWord):
"for debugging: retrieve info of word"
if not tWord:
echo("> nothing to find")
return True
lMorph = _oSpellChecker.getMorph(tWord[1])
if not lMorph:
echo("> not in dictionary")
return True
echo("TOKENS:", dTokenPos)
if tWord[0] in dTokenPos and "lMorph" in dTokenPos[tWord[0]]:
echo("DA: " + str(dTokenPos[tWord[0]]["lMorph"]))
echo("FSA: " + str(lMorph))
return True
def morph (dTokenPos, tWord, sPattern, sNegPattern="", bNoWord=False):
"analyse a tuple (position, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
if not tWord:
return bNoWord
lMorph = dTokenPos[tWord[0]]["lMorph"] if tWord[0] in dTokenPos and "lMorph" in dTokenPos[tWord[0]] else _oSpellChecker.getMorph(tWord[1])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
def analyse (sWord, sPattern, sNegPattern=""):
"analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
lMorph = _oSpellChecker.getMorph(sWord)
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
#### Analyse tokens for graph rules
def g_value (dToken, sValues, nLeft=None, nRight=None):
"test if <dToken['sValue']> is in sValues (each value should be separated with |)"
sValue = "|"+dToken["sValue"]+"|" if nLeft is None else "|"+dToken["sValue"][slice(nLeft, nRight)]+"|"
if sValue in sValues:
return True
if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to make valid words such as "Laissez-les", "Passe-partout".
if sValue.lower() in sValues:
return True
elif dToken["sValue"].isupper():
#if sValue.lower() in sValues:
# return True
sValue = "|"+sValue[1:].capitalize()
if sValue in sValues:
return True
sValue = sValue.lower()
if sValue in sValues:
return True
return False
def g_morph (dToken, sPattern, sNegPattern="", nLeft=None, nRight=None, bMemorizeMorph=True):
"analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies"
if "lMorph" in dToken:
lMorph = dToken["lMorph"]
else:
if nLeft is not None:
lMorph = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
if bMemorizeMorph:
dToken["lMorph"] = lMorph
else:
lMorph = _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
def g_analyse (dToken, sPattern, sNegPattern="", nLeft=None, nRight=None, bMemorizeMorph=True):
"analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)"
if nLeft is not None:
lMorph = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
if bMemorizeMorph:
dToken["lMorph"] = lMorph
else:
lMorph = _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
def g_merged_analyse (dToken1, dToken2, cMerger, sPattern, sNegPattern="", bSetMorph=True):
"merge two token values, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)"
lMorph = _oSpellChecker.getMorph(dToken1["sValue"] + cMerger + dToken2["sValue"])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
bResult = all(zPattern.search(sMorph) for sMorph in lMorph)
if bResult and bSetMorph:
dToken1["lMorph"] = lMorph
return bResult
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
bResult = any(zPattern.search(sMorph) for sMorph in lMorph)
if bResult and bSetMorph:
dToken1["lMorph"] = lMorph
return bResult
def g_tag_before (dToken, dTags, sTag):
"returns True if <sTag> is present on tokens before <dToken>"
if sTag not in dTags:
return False
if dToken["i"] > dTags[sTag][0]:
return True
return False
def g_tag_after (dToken, dTags, sTag):
"returns True if <sTag> is present on tokens after <dToken>"
if sTag not in dTags:
return False
if dToken["i"] < dTags[sTag][1]:
return True
return False
def g_tag (dToken, sTag):
"returns True if <sTag> is present on token <dToken>"
return "aTags" in dToken and sTag in dToken["aTags"]
def g_meta (dToken, sType):
"returns True if <sType> is equal to the token type"
return dToken["sType"] == sType
def g_space_between_tokens (dToken1, dToken2, nMin, nMax=None):
"checks if spaces between tokens is >= <nMin> and <= <nMax>"
nSpace = dToken2["nStart"] - dToken1["nEnd"]
if nSpace < nMin:
return False
if nMax is not None and nSpace > nMax:
return False
return True
def g_token (lToken, i):
"return token at index <i> in lToken (or the closest one)"
if i < 0:
return lToken[0]
if i >= len(lToken):
return lToken[-1]
return lToken[i]
#### Disambiguator for regex rules
def select (dTokenPos, nPos, sWord, sPattern, lDefault=None):
"Disambiguation: select morphologies of <sWord> matching <sPattern>"
if not sWord:
return True
if nPos not in dTokenPos:
echo("Error. There should be a token at this position: ", nPos)
return True
lMorph = _oSpellChecker.getMorph(sWord)
if not lMorph or len(lMorph) == 1:
return True
lSelect = [ sMorph for sMorph in lMorph if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dTokenPos[nPos]["lMorph"] = lSelect
elif lDefault:
dTokenPos[nPos]["lMorph"] = lDefault
return True
def exclude (dTokenPos, nPos, sWord, sPattern, lDefault=None):
"Disambiguation: exclude morphologies of <sWord> matching <sPattern>"
if not sWord:
return True
if nPos not in dTokenPos:
echo("Error. There should be a token at this position: ", nPos)
return True
lMorph = _oSpellChecker.getMorph(sWord)
if not lMorph or len(lMorph) == 1:
return True
lSelect = [ sMorph for sMorph in lMorph if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dTokenPos[nPos]["lMorph"] = lSelect
elif lDefault:
dTokenPos[nPos]["lMorph"] = lDefault
return True
def define (dTokenPos, nPos, lMorph):
"Disambiguation: set morphologies of token at <nPos> with <lMorph>"
if nPos not in dTokenPos:
echo("Error. There should be a token at this position: ", nPos)
return True
dTokenPos[nPos]["lMorph"] = lMorph
return True
#### Disambiguation for graph rules
def g_select (dToken, sPattern, lDefault=None):
"Disambiguation: select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_exclude (dToken, sPattern, lDefault=None):
"Disambiguation: select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_add_morph (dToken, lNewMorph):
"Disambiguation: add a morphology to a token"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
lMorph.extend(lNewMorph)
dToken["lMorph"] = lMorph
return True
def g_define (dToken, lMorph):
"Disambiguation: set morphologies of <dToken>, always return True"
dToken["lMorph"] = lMorph
#echo("DA:", dToken["sValue"], lMorph)
return True
def g_define_from (dToken, nLeft=None, nRight=None):
"Disambiguation: set morphologies of <dToken> with slicing its value with <nLeft> and <nRight>"
if nLeft is not None:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
else:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"])
return True
def g_change_meta (dToken, sType):
"Disambiguation: change type of token"
dToken["sType"] = sType
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
#### CALLABLES FOR REGEX RULES (generated code)
${callables}
#### CALLABLES FOR GRAPH RULES (generated code)
${graph_callables}
|