︙ | | | ︙ | |
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
|
except ImportError:
_bWriterError = False
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getSpellChecker", \
"setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
"ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules" ]
__version__ = "${version}"
lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
|
|
|
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
|
except ImportError:
_bWriterError = False
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getSpellChecker", \
"setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
"ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules", "setWriterUnderliningStyle" ]
__version__ = "${version}"
lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
|
︙ | | | ︙ | |
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
_sAppContext = "" # what software is running
_dOptions = None
_dOptionsColors = None
_oSpellChecker = None
_oTokenizer = None
_aIgnoredRules = set()
#### Initialization
def load (sContext="Python", sColorType="aRGB"):
"initialization of the grammar checker"
global _oSpellChecker
|
>
>
>
|
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
_sAppContext = "" # what software is running
_dOptions = None
_dOptionsColors = None
_oSpellChecker = None
_oTokenizer = None
_aIgnoredRules = set()
# Writer underlining style
_bMulticolor = True
_nUnderliningStyle = 0
#### Initialization
def load (sContext="Python", sColorType="aRGB"):
"initialization of the grammar checker"
global _oSpellChecker
|
︙ | | | ︙ | |
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
|
"generator: returns typle (sOption, sLineId, sRuleId)"
if sFilter:
try:
zFilter = re.compile(sFilter)
except re.error:
echo("# Error. List rules: wrong regex.")
sFilter = None
for sOption, lRuleGroup in chain(_getRules(True), _getRules(False)):
if sOption != "@@@@":
for _, _, sLineId, sRuleId, _, _ in lRuleGroup:
if not sFilter or zFilter.search(sRuleId):
yield (sOption, sLineId, sRuleId)
def displayRules (sFilter=None):
"display the name of rules, with the filter <sFilter>"
echo("List of rules. Filter: << " + str(sFilter) + " >>")
for sOption, sLineId, sRuleId in listRules(sFilter):
echo("{:<10} {:<10} {}".format(sOption, sLineId, sRuleId))
#### Options
def setOption (sOpt, bVal):
"set option <sOpt> with <bVal> if it exists"
if sOpt in _dOptions:
|
>
|
>
>
>
>
>
|
|
|
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
|
"generator: returns typle (sOption, sLineId, sRuleId)"
if sFilter:
try:
zFilter = re.compile(sFilter)
except re.error:
echo("# Error. List rules: wrong regex.")
sFilter = None
# regex rules
for sOption, lRuleGroup in chain(_getRules(True), _getRules(False)):
if sOption != "@@@@":
for _, _, sLineId, sRuleId, _, _ in lRuleGroup:
if not sFilter or zFilter.search(sRuleId):
yield ("RegEx", sOption, sLineId, sRuleId)
# tokens rules
for sRuleName, lActions in _rules_graph.dRule.items():
sOption, _, cActionType, *_ = lActions
if cActionType == "-":
yield("Tokens", sOption, "", sRuleName)
def displayRules (sFilter=None):
"display the name of rules, with the filter <sFilter>"
echo("List of rules. Filter: << " + str(sFilter) + " >>")
for sOption, sLineId, sRuleId, sType in listRules(sFilter):
echo("{:<8} {:<10} {:<10} {}".format(sOption, sLineId, sRuleId, sType))
#### Options
def setOption (sOpt, bVal):
"set option <sOpt> with <bVal> if it exists"
if sOpt in _dOptions:
|
︙ | | | ︙ | |
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
|
def getOptionsLabels (sLang):
"return options labels"
return gc_options.getUI(sLang)
def displayOptions (sLang):
"display the list of grammar checking options"
echo("List of options")
echo("\n".join( [ k+":\t"+str(v)+"\t"+gc_options.getUI(sLang).get(k, ("?", ""))[0] for k, v in sorted(_dOptions.items()) ] ))
echo("")
def resetOptions ():
"set options to default values"
global _dOptions
_dOptions = getDefaultOptions()
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"init point to analyse <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
oText = TextParser(sText)
return oText.parse(sCountry, bDebug, dOptions, bContext, bFullInfo)
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
|
def getOptionsLabels (sLang):
"return options labels"
return gc_options.getUI(sLang)
def displayOptions (sLang="${lang}"):
"display the list of grammar checking options"
echo("Options:")
echo("\n".join( [ k+":\t"+str(v)+"\t"+gc_options.getUI(sLang).get(k, ("?", ""))[0] for k, v in sorted(_dOptions.items()) ] ))
echo("")
def resetOptions ():
"set options to default values"
global _dOptions
_dOptions = getDefaultOptions()
def setWriterUnderliningStyle (sStyle="BOLDWAVE", bMulticolor=True):
"set underlining style for Writer (WAVE, BOLDWAVE, BOLD)"
global _nUnderliningStyle
global _bMulticolor
# https://api.libreoffice.org/docs/idl/ref/FontUnderline_8idl.html
# WAVE: 10, BOLD: 12, BOLDWAVE: 18 DASH: 5
if sStyle == "WAVE":
_nUnderliningStyle = 0 # 0 for default Writer setting
elif sStyle == "BOLDWAVE":
_nUnderliningStyle = 18
elif sStyle == "BOLD":
_nUnderliningStyle = 12
elif sStyle == "DASH":
_nUnderliningStyle = 5
else:
_nUnderliningStyle = 0
_bMulticolor = bMulticolor
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"init point to analyse <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
oText = TextParser(sText)
return oText.parse(sCountry, bDebug, dOptions, bContext, bFullInfo)
|
︙ | | | ︙ | |
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
|
self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lToken if dToken["sType"] != "INFO" }
if bDebug:
echo("UPDATE:")
echo(self)
def _getNextPointers (self, dToken, dGraph, dPointer, bDebug=False):
"generator: return nodes where <dToken> “values” match <dNode> arcs"
dNode = dPointer["dNode"]
iNode1 = dPointer["iNode1"]
bTokenFound = False
# token value
if dToken["sValue"] in dNode:
if bDebug:
echo(" MATCH: " + dToken["sValue"])
yield { "iNode1": iNode1, "dNode": dGraph[dNode[dToken["sValue"]]] }
bTokenFound = True
if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to make valid words such as "Laissez-les", "Passe-partout".
sValue = dToken["sValue"].lower()
if sValue in dNode:
if bDebug:
echo(" MATCH: " + sValue)
yield { "iNode1": iNode1, "dNode": dGraph[dNode[sValue]] }
bTokenFound = True
elif dToken["sValue"].isupper():
sValue = dToken["sValue"].lower()
if sValue in dNode:
if bDebug:
echo(" MATCH: " + sValue)
yield { "iNode1": iNode1, "dNode": dGraph[dNode[sValue]] }
bTokenFound = True
sValue = dToken["sValue"].capitalize()
if sValue in dNode:
if bDebug:
echo(" MATCH: " + sValue)
yield { "iNode1": iNode1, "dNode": dGraph[dNode[sValue]] }
bTokenFound = True
# regex value arcs
if dToken["sType"] not in frozenset(["INFO", "PUNC", "SIGN"]):
if "<re_value>" in dNode:
for sRegex in dNode["<re_value>"]:
if "¬" not in sRegex:
# no anti-pattern
if re.search(sRegex, dToken["sValue"]):
if bDebug:
echo(" MATCH: ~" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_value>"][sRegex]] }
bTokenFound = True
else:
# there is an anti-pattern
sPattern, sNegPattern = sRegex.split("¬", 1)
if sNegPattern and re.search(sNegPattern, dToken["sValue"]):
continue
if not sPattern or re.search(sPattern, dToken["sValue"]):
if bDebug:
echo(" MATCH: ~" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_value>"][sRegex]] }
bTokenFound = True
# analysable tokens
if dToken["sType"][0:4] == "WORD":
# token lemmas
if "<lemmas>" in dNode:
for sLemma in _oSpellChecker.getLemma(dToken["sValue"]):
if sLemma in dNode["<lemmas>"]:
if bDebug:
echo(" MATCH: >" + sLemma)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<lemmas>"][sLemma]] }
bTokenFound = True
# regex morph arcs
if "<re_morph>" in dNode:
lMorph = dToken.get("lMorph", _oSpellChecker.getMorph(dToken["sValue"]))
for sRegex in dNode["<re_morph>"]:
if "¬" not in sRegex:
# no anti-pattern
if any(re.search(sRegex, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_morph>"][sRegex]] }
bTokenFound = True
else:
# there is an anti-pattern
sPattern, sNegPattern = sRegex.split("¬", 1)
if sNegPattern == "*":
# all morphologies must match with <sPattern>
if sPattern:
if lMorph and all(re.search(sPattern, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_morph>"][sRegex]] }
bTokenFound = True
else:
if sNegPattern and any(re.search(sNegPattern, sMorph) for sMorph in lMorph):
continue
if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_morph>"][sRegex]] }
bTokenFound = True
# token tags
if "aTags" in dToken and "<tags>" in dNode:
for sTag in dToken["aTags"]:
if sTag in dNode["<tags>"]:
if bDebug:
echo(" MATCH: /" + sTag)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<tags>"][sTag]] }
bTokenFound = True
# meta arc (for token type)
if "<meta>" in dNode:
for sMeta in dNode["<meta>"]:
# no regex here, we just search if <dNode["sType"]> exists within <sMeta>
if sMeta == "*" or dToken["sType"] == sMeta:
if bDebug:
echo(" MATCH: *" + sMeta)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<meta>"][sMeta]] }
bTokenFound = True
elif "¬" in sMeta:
if dToken["sType"] not in sMeta:
if bDebug:
echo(" MATCH: *" + sMeta)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<meta>"][sMeta]] }
bTokenFound = True
if not bTokenFound and "bKeep" in dPointer:
yield dPointer
# JUMP
# Warning! Recurssion!
if "<>" in dNode:
dPointer2 = { "iNode1": iNode1, "dNode": dGraph[dNode["<>"]], "bKeep": True }
yield from self._getNextPointers(dToken, dGraph, dPointer2, bDebug)
def parseGraph (self, dGraph, sCountry="${country_default}", dOptions=None, bShowRuleId=False, bDebug=False, bContext=False):
"parse graph with tokens from the text and execute actions encountered"
lPointer = []
bTagAndRewrite = False
for iToken, dToken in enumerate(self.lToken):
if bDebug:
echo("TOKEN: " + dToken["sValue"])
# check arcs for each existing pointer
lNextPointer = []
for dPointer in lPointer:
lNextPointer.extend(self._getNextPointers(dToken, dGraph, dPointer, bDebug))
lPointer = lNextPointer
# check arcs of first nodes
lPointer.extend(self._getNextPointers(dToken, dGraph, { "iNode1": iToken, "dNode": dGraph[0] }, bDebug))
# check if there is rules to check for each pointer
for dPointer in lPointer:
#if bDebug:
# echo("+", dPointer)
if "<rules>" in dPointer["dNode"]:
bChange = self._executeActions(dGraph, dPointer["dNode"]["<rules>"], dPointer["iNode1"]-1, iToken, dOptions, sCountry, bShowRuleId, bDebug, bContext)
if bChange:
bTagAndRewrite = True
if bTagAndRewrite:
self.rewriteFromTags(bDebug)
if bDebug:
echo(self)
return self.sSentence
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
|
self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lToken if dToken["sType"] != "INFO" }
if bDebug:
echo("UPDATE:")
echo(self)
def _getNextPointers (self, dToken, dGraph, dPointer, bDebug=False):
"generator: return nodes where <dToken> “values” match <dNode> arcs"
dNode = dGraph[dPointer["iNode"]]
iToken1 = dPointer["iToken1"]
bTokenFound = False
# token value
if dToken["sValue"] in dNode:
if bDebug:
echo(" MATCH: " + dToken["sValue"])
yield { "iToken1": iToken1, "iNode": dNode[dToken["sValue"]] }
bTokenFound = True
if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to make valid words such as "Laissez-les", "Passe-partout".
sValue = dToken["sValue"].lower()
if sValue in dNode:
if bDebug:
echo(" MATCH: " + sValue)
yield { "iToken1": iToken1, "iNode": dNode[sValue] }
bTokenFound = True
elif dToken["sValue"].isupper():
sValue = dToken["sValue"].lower()
if sValue in dNode:
if bDebug:
echo(" MATCH: " + sValue)
yield { "iToken1": iToken1, "iNode": dNode[sValue] }
bTokenFound = True
sValue = dToken["sValue"].capitalize()
if sValue in dNode:
if bDebug:
echo(" MATCH: " + sValue)
yield { "iToken1": iToken1, "iNode": dNode[sValue] }
bTokenFound = True
# regex value arcs
if dToken["sType"] not in frozenset(["INFO", "PUNC", "SIGN"]):
if "<re_value>" in dNode:
for sRegex in dNode["<re_value>"]:
if "¬" not in sRegex:
# no anti-pattern
if re.search(sRegex, dToken["sValue"]):
if bDebug:
echo(" MATCH: ~" + sRegex)
yield { "iToken1": iToken1, "iNode": dNode["<re_value>"][sRegex] }
bTokenFound = True
else:
# there is an anti-pattern
sPattern, sNegPattern = sRegex.split("¬", 1)
if sNegPattern and re.search(sNegPattern, dToken["sValue"]):
continue
if not sPattern or re.search(sPattern, dToken["sValue"]):
if bDebug:
echo(" MATCH: ~" + sRegex)
yield { "iToken1": iToken1, "iNode": dNode["<re_value>"][sRegex] }
bTokenFound = True
# analysable tokens
if dToken["sType"][0:4] == "WORD":
# token lemmas
if "<lemmas>" in dNode:
for sLemma in _oSpellChecker.getLemma(dToken["sValue"]):
if sLemma in dNode["<lemmas>"]:
if bDebug:
echo(" MATCH: >" + sLemma)
yield { "iToken1": iToken1, "iNode": dNode["<lemmas>"][sLemma] }
bTokenFound = True
# morph arcs
if "<morph>" in dNode:
lMorph = dToken.get("lMorph", _oSpellChecker.getMorph(dToken["sValue"]))
if lMorph:
for sSearch in dNode["<morph>"]:
if "¬" not in sSearch:
# no anti-pattern
if any(sSearch in sMorph for sMorph in lMorph):
if bDebug:
echo(" MATCH: $" + sSearch)
yield { "iToken1": iToken1, "iNode": dNode["<morph>"][sSearch] }
bTokenFound = True
else:
# there is an anti-pattern
sPattern, sNegPattern = sSearch.split("¬", 1)
if sNegPattern == "*":
# all morphologies must match with <sPattern>
if sPattern:
if all(sPattern in sMorph for sMorph in lMorph):
if bDebug:
echo(" MATCH: $" + sSearch)
yield { "iToken1": iToken1, "iNode": dNode["<morph>"][sSearch] }
bTokenFound = True
else:
if sNegPattern and any(sNegPattern in sMorph for sMorph in lMorph):
continue
if not sPattern or any(sPattern in sMorph for sMorph in lMorph):
if bDebug:
echo(" MATCH: $" + sSearch)
yield { "iToken1": iToken1, "iNode": dNode["<morph>"][sSearch] }
bTokenFound = True
# regex morph arcs
if "<re_morph>" in dNode:
lMorph = dToken.get("lMorph", _oSpellChecker.getMorph(dToken["sValue"]))
if lMorph:
for sRegex in dNode["<re_morph>"]:
if "¬" not in sRegex:
# no anti-pattern
if any(re.search(sRegex, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iToken1": iToken1, "iNode": dNode["<re_morph>"][sRegex] }
bTokenFound = True
else:
# there is an anti-pattern
sPattern, sNegPattern = sRegex.split("¬", 1)
if sNegPattern == "*":
# all morphologies must match with <sPattern>
if sPattern:
if all(re.search(sPattern, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iToken1": iToken1, "iNode": dNode["<re_morph>"][sRegex] }
bTokenFound = True
else:
if sNegPattern and any(re.search(sNegPattern, sMorph) for sMorph in lMorph):
continue
if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iToken1": iToken1, "iNode": dNode["<re_morph>"][sRegex] }
bTokenFound = True
# token tags
if "aTags" in dToken and "<tags>" in dNode:
for sTag in dToken["aTags"]:
if sTag in dNode["<tags>"]:
if bDebug:
echo(" MATCH: /" + sTag)
yield { "iToken1": iToken1, "iNode": dNode["<tags>"][sTag] }
bTokenFound = True
# meta arc (for token type)
if "<meta>" in dNode:
for sMeta in dNode["<meta>"]:
# no regex here, we just search if <dNode["sType"]> exists within <sMeta>
if sMeta == "*" or dToken["sType"] == sMeta:
if bDebug:
echo(" MATCH: *" + sMeta)
yield { "iToken1": iToken1, "iNode": dNode["<meta>"][sMeta] }
bTokenFound = True
elif "¬" in sMeta:
if dToken["sType"] not in sMeta:
if bDebug:
echo(" MATCH: *" + sMeta)
yield { "iToken1": iToken1, "iNode": dNode["<meta>"][sMeta] }
bTokenFound = True
if not bTokenFound and "bKeep" in dPointer:
yield dPointer
# JUMP
# Warning! Recurssion!
if "<>" in dNode:
dPointer2 = { "iToken1": iToken1, "iNode": dNode["<>"], "bKeep": True }
yield from self._getNextPointers(dToken, dGraph, dPointer2, bDebug)
def parseGraph (self, dGraph, sCountry="${country_default}", dOptions=None, bShowRuleId=False, bDebug=False, bContext=False):
"parse graph with tokens from the text and execute actions encountered"
lPointer = []
bTagAndRewrite = False
for iToken, dToken in enumerate(self.lToken):
if bDebug:
echo("TOKEN: " + dToken["sValue"])
# check arcs for each existing pointer
lNextPointer = []
for dPointer in lPointer:
lNextPointer.extend(self._getNextPointers(dToken, dGraph, dPointer, bDebug))
lPointer = lNextPointer
# check arcs of first nodes
lPointer.extend(self._getNextPointers(dToken, dGraph, { "iToken1": iToken, "iNode": 0 }, bDebug))
# check if there is rules to check for each pointer
for dPointer in lPointer:
#if bDebug:
# echo("+", dPointer)
if "<rules>" in dGraph[dPointer["iNode"]]:
bChange = self._executeActions(dGraph, dGraph[dPointer["iNode"]]["<rules>"], dPointer["iToken1"]-1, iToken, dOptions, sCountry, bShowRuleId, bDebug, bContext)
if bChange:
bTagAndRewrite = True
if bTagAndRewrite:
self.rewriteFromTags(bDebug)
if bDebug:
echo(self)
return self.sSentence
|
︙ | | | ︙ | |
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
|
self.dTags[sWhat] = [nTokenStart, nTokenStart]
else:
self.dTags[sWhat][0] = min(nTokenStart, self.dTags[sWhat][0])
self.dTags[sWhat][1] = max(nTokenEnd, self.dTags[sWhat][1])
elif cActionType == "!":
# immunity
if bDebug:
echo(" IMMUNITY: " + _rules_graph.dRule[sRuleId])
nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0]
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
if nTokenEnd - nTokenStart == 0:
self.lToken[nTokenStart]["bImmune"] = True
nErrorStart = self.nOffsetWithinParagraph + self.lToken[nTokenStart]["nStart"]
if nErrorStart in self.dError:
del self.dError[nErrorStart]
|
|
|
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
|
self.dTags[sWhat] = [nTokenStart, nTokenStart]
else:
self.dTags[sWhat][0] = min(nTokenStart, self.dTags[sWhat][0])
self.dTags[sWhat][1] = max(nTokenEnd, self.dTags[sWhat][1])
elif cActionType == "!":
# immunity
if bDebug:
echo(" IMMUNITY: " + sLineId + " / " + sRuleId)
nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0]
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
if nTokenEnd - nTokenStart == 0:
self.lToken[nTokenStart]["bImmune"] = True
nErrorStart = self.nOffsetWithinParagraph + self.lToken[nTokenStart]["nStart"]
if nErrorStart in self.dError:
del self.dError[nErrorStart]
|
︙ | | | ︙ | |
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
|
xErr.nErrorStart = nStart
xErr.nErrorLength = nLen
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sRuleId
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
xErr.aSuggestions = tuple(lSugg)
#xPropertyLineType = PropertyValue(Name="LineType", Value=5) # DASH or WAVE
xPropertyLineColor = PropertyValue(Name="LineColor", Value=_dOptionsColors.get(sOption, 33023))
if sURL:
xPropertyURL = PropertyValue(Name="FullCommentURL", Value=sURL)
xErr.aProperties = (xPropertyURL, xPropertyLineColor)
else:
xErr.aProperties = (xPropertyLineColor,)
return xErr
def _createErrorAsDict (self, nStart, nEnd, sLineId, sRuleId, sOption, sMessage, lSugg, sURL, bContext):
dErr = {
"nStart": nStart,
"nEnd": nEnd,
"sLineId": sLineId,
|
>
>
>
|
>
|
|
<
<
|
|
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
|
xErr.nErrorStart = nStart
xErr.nErrorLength = nLen
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sRuleId
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
xErr.aSuggestions = tuple(lSugg)
# Properties
lProperties = []
if _nUnderliningStyle:
lProperties.append(PropertyValue(Name="LineType", Value=_nUnderliningStyle))
if _bMulticolor:
lProperties.append(PropertyValue(Name="LineColor", Value=_dOptionsColors.get(sOption, 33023)))
if sURL:
lProperties.append(PropertyValue(Name="FullCommentURL", Value=sURL))
xErr.aProperties = lProperties
return xErr
def _createErrorAsDict (self, nStart, nEnd, sLineId, sRuleId, sOption, sMessage, lSugg, sURL, bContext):
dErr = {
"nStart": nStart,
"nEnd": nEnd,
"sLineId": sLineId,
|
︙ | | | ︙ | |
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
|
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
self.lToken[nTokenRewriteStart]["sNewValue"] = sWhat
else:
# several tokens
lTokenValue = sWhat.split("|")
if len(lTokenValue) != (nTokenRewriteEnd - nTokenRewriteStart + 1):
echo("Error. Text processor: number of replacements != number of tokens.")
return
for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue):
if not sValue or sValue == "*":
self.lToken[i]["bToRemove"] = True
else:
if bUppercase:
sValue = sValue[0:1].upper() + sValue[1:]
|
>
|
|
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
|
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
self.lToken[nTokenRewriteStart]["sNewValue"] = sWhat
else:
# several tokens
lTokenValue = sWhat.split("|")
if len(lTokenValue) != (nTokenRewriteEnd - nTokenRewriteStart + 1):
if (bDebug):
echo("Error. Text processor: number of replacements != number of tokens.")
return
for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue):
if not sValue or sValue == "*":
self.lToken[i]["bToRemove"] = True
else:
if bUppercase:
sValue = sValue[0:1].upper() + sValue[1:]
|
︙ | | | ︙ | |
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
|
return False
def g_tag (dToken, sTag):
"returns True if <sTag> is present on token <dToken>"
return "aTags" in dToken and sTag in dToken["aTags"]
def g_space_between_tokens (dToken1, dToken2, nMin, nMax=None):
"checks if spaces between tokens is >= <nMin> and <= <nMax>"
nSpace = dToken2["nStart"] - dToken1["nEnd"]
if nSpace < nMin:
return False
if nMax is not None and nSpace > nMax:
|
>
>
>
>
>
|
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
|
return False
def g_tag (dToken, sTag):
"returns True if <sTag> is present on token <dToken>"
return "aTags" in dToken and sTag in dToken["aTags"]
def g_meta (dToken, sType):
"returns True if <sType> is equal to the token type"
return dToken["sType"] == sType
def g_space_between_tokens (dToken1, dToken2, nMin, nMax=None):
"checks if spaces between tokens is >= <nMin> and <= <nMax>"
nSpace = dToken2["nStart"] - dToken1["nEnd"]
if nSpace < nMin:
return False
if nMax is not None and nSpace > nMax:
|
︙ | | | ︙ | |
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
|
dTokenPos[nPos]["lMorph"] = lMorph
return True
#### Disambiguation for graph rules
def g_select (dToken, sPattern, lDefault=None):
"select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_exclude (dToken, sPattern, lDefault=None):
"select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_define (dToken, lMorph):
"set morphologies of <dToken>, always return True"
dToken["lMorph"] = lMorph
#echo("DA:", dToken["sValue"], lMorph)
return True
def g_define_from (dToken, nLeft=None, nRight=None):
"set morphologies of <dToken> with slicing its value with <nLeft> and <nRight>"
if nLeft is not None:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
else:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"])
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
|
|
|
>
>
>
>
>
>
>
>
|
|
>
>
>
>
>
>
|
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
|
dTokenPos[nPos]["lMorph"] = lMorph
return True
#### Disambiguation for graph rules
def g_select (dToken, sPattern, lDefault=None):
"Disambiguation: select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_exclude (dToken, sPattern, lDefault=None):
"Disambiguation: select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#echo("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_add_morph (dToken, lNewMorph):
"Disambiguation: add a morphology to a token"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
lMorph.extend(lNewMorph)
dToken["lMorph"] = lMorph
return True
def g_define (dToken, lMorph):
"Disambiguation: set morphologies of <dToken>, always return True"
dToken["lMorph"] = lMorph
#echo("DA:", dToken["sValue"], lMorph)
return True
def g_define_from (dToken, nLeft=None, nRight=None):
"Disambiguation: set morphologies of <dToken> with slicing its value with <nLeft> and <nRight>"
if nLeft is not None:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
else:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"])
return True
def g_change_meta (dToken, sType):
"Disambiguation: change type of token"
dToken["sType"] = sType
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
|
︙ | | | ︙ | |