Overview
| Comment: | [core] gc engine: lazy loading for graph rules |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
f648075faaeb87d448d6d1fe5ed8cf23 |
| User & Date: | olr on 2018-09-05 17:51:49 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-09-05
| ||
| 18:47 | [fr][bug] paramètres surnuméraires check-in: 1286ea415b user: olr tags: fr, rg | |
| 17:51 | [core] gc engine: lazy loading for graph rules check-in: f648075faa user: olr tags: core, rg | |
| 16:28 | [core] gc engine: code refactoring check-in: b3dac7c19b user: olr tags: core, rg | |
Changes
Modified gc_core/py/lang_core/gc_engine.py from [790644a22e] to [dd21ce6816].
| ︙ | ︙ | |||
11 12 13 14 15 16 17 | from itertools import chain from ..graphspell.spellchecker import SpellChecker from ..graphspell.tokenizer import Tokenizer from ..graphspell.echo import echo from . import gc_options | < < | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
from itertools import chain
from ..graphspell.spellchecker import SpellChecker
from ..graphspell.tokenizer import Tokenizer
from ..graphspell.echo import echo
from . import gc_options
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_bWriterError = True
|
| ︙ | ︙ | |||
41 42 43 44 45 46 47 48 |
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"
# Modules
_rules = None # module gc_rules
| > | | 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"
# Modules
_rules = None # module gc_rules
_rules_graph = None # module gc_rules_graph
# Data
_sAppContext = "" # what software is running
_dOptions = None
_oSpellChecker = None
_oTokenizer = None
_aIgnoredRules = set()
|
| ︙ | ︙ | |||
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
def _loadRules ():
from . import gc_rules
global _rules
_rules = gc_rules
# compile rules regex
for sOption, lRuleGroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
if sOption != "@@@@":
for aRule in lRuleGroup:
try:
aRule[0] = re.compile(aRule[0])
except:
| > > > | 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
if not bParagraph:
return _rules.lSentenceRules
return _rules.lParagraphRules
def _loadRules ():
from . import gc_rules
from . import gc_rules_graph
global _rules
global _rules_graph
_rules = gc_rules
_rules_graph = gc_rules_graph
# compile rules regex
for sOption, lRuleGroup in chain(_rules.lParagraphRules, _rules.lSentenceRules):
if sOption != "@@@@":
for aRule in lRuleGroup:
try:
aRule[0] = re.compile(aRule[0])
except:
|
| ︙ | ︙ | |||
280 281 282 283 284 285 286 |
if not bParagraph and bChange:
self.update(sText, bDebug)
bChange = False
for sGraphName, sLineId in lRuleGroup:
if sGraphName not in dOptions or dOptions[sGraphName]:
if bDebug:
print("\n>>>> GRAPH:", sGraphName, sLineId)
| | | 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
if not bParagraph and bChange:
self.update(sText, bDebug)
bChange = False
for sGraphName, sLineId in lRuleGroup:
if sGraphName not in dOptions or dOptions[sGraphName]:
if bDebug:
print("\n>>>> GRAPH:", sGraphName, sLineId)
sText = self.parseGraph(_rules_graph.dAllGraph[sGraphName], sCountry, dOptions, bShowRuleId, bDebug, bContext)
elif not sOption or dOptions.get(sOption, False):
# regex rules
for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(sText):
bCondMemo = None
for sFuncCond, cActionType, sWhat, *eAct in lActions:
|
| ︙ | ︙ | |||
503 504 505 506 507 508 509 |
bChange = False
for sLineId, nextNodeKey in dNode.items():
bCondMemo = None
for sRuleId in dGraph[nextNodeKey]:
try:
if bDebug:
print(" >TRY:", sRuleId)
| | | 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 |
bChange = False
for sLineId, nextNodeKey in dNode.items():
bCondMemo = None
for sRuleId in dGraph[nextNodeKey]:
try:
if bDebug:
print(" >TRY:", sRuleId)
sOption, sFuncCond, cActionType, sWhat, *eAct = _rules_graph.dRule[sRuleId]
# Suggestion [ option, condition, "-", replacement/suggestion/action, iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, sURL ]
# TextProcessor [ option, condition, "~", replacement/suggestion/action, iTokenStart, iTokenEnd, bCaseSvty ]
# Disambiguator [ option, condition, "=", replacement/suggestion/action ]
# Tag [ option, condition, "/", replacement/suggestion/action, iTokenStart, iTokenEnd ]
# Immunity [ option, condition, "%", "", iTokenStart, iTokenEnd ]
# Test [ option, condition, ">", "" ]
if not sOption or dOptions.get(sOption, False):
|
| ︙ | ︙ | |||
565 566 567 568 569 570 571 |
self.dTags[sWhat] = [nTokenStart, nTokenStart]
else:
self.dTags[sWhat][0] = min(nTokenStart, self.dTags[sWhat][0])
self.dTags[sWhat][1] = max(nTokenEnd, self.dTags[sWhat][1])
elif cActionType == "%":
# immunity
if bDebug:
| | | 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 |
self.dTags[sWhat] = [nTokenStart, nTokenStart]
else:
self.dTags[sWhat][0] = min(nTokenStart, self.dTags[sWhat][0])
self.dTags[sWhat][1] = max(nTokenEnd, self.dTags[sWhat][1])
elif cActionType == "%":
# immunity
if bDebug:
print(" IMMUNITY:\n ", _rules_graph.dRule[sRuleId])
nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0]
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
if nTokenEnd - nTokenStart == 0:
self.lToken[nTokenStart]["bImmune"] = True
nErrorStart = self.nOffsetWithinParagraph + self.lToken[nTokenStart]["nStart"]
if nErrorStart in self.dError:
del self.dError[nErrorStart]
|
| ︙ | ︙ |