Overview
| Comment: | [core] gc engine: merge error creation functions (code clarification) |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
6f2fcade8d9effdf81c28d8c5a9528be |
| User & Date: | olr on 2018-06-27 20:18:09 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-06-27
| ||
| 20:53 | [core] gc engine: create errors, code clarification check-in: f086eb66a0 user: olr tags: core, rg | |
| 20:18 | [core] gc engine: merge error creation functions (code clarification) check-in: 6f2fcade8d user: olr tags: core, rg | |
| 18:58 | [core][revert] gc engine: don’t remove merged tokens check-in: 82a84a50e8 user: olr tags: core, rg | |
Changes
Modified gc_core/py/lang_core/gc_engine.py from [7a8dc9f99f] to [6d8fead8fe].
| ︙ | ︙ | |||
48 49 50 51 52 53 54 | # data _sAppContext = "" # what software is running _dOptions = None _oSpellChecker = None _oTokenizer = None _aIgnoredRules = set() | < < < < | 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# data
_sAppContext = "" # what software is running
_dOptions = None
_oSpellChecker = None
_oTokenizer = None
_aIgnoredRules = set()
#### Initialization
def load (sContext="Python"):
"initialization of the grammar checker"
global _oSpellChecker
global _sAppContext
global _dOptions
global _oTokenizer
try:
_oSpellChecker = SpellChecker("${lang}", "${dic_main_filename_py}", "${dic_extended_filename_py}", "${dic_community_filename_py}", "${dic_personal_filename_py}")
_sAppContext = sContext
_dOptions = dict(gc_options.getOptions(sContext)) # duplication necessary, to be able to reset to default
_oTokenizer = _oSpellChecker.getTokenizer()
_oSpellChecker.activateStorage()
except:
traceback.print_exc()
def _getRules (bParagraph):
try:
if not bParagraph:
|
| ︙ | ︙ | |||
182 183 184 185 186 187 188 |
try:
bCondMemo = not sFuncCond or globals()[sFuncCond](s, sx, m, dTokenPos, sCountry, bCondMemo)
if bCondMemo:
if cActionType == "-":
# grammar error
nErrorStart = nOffset + m.start(eAct[0])
if nErrorStart not in dErrs or nPriority > dPriority.get(nErrorStart, -1):
| | | 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
try:
bCondMemo = not sFuncCond or globals()[sFuncCond](s, sx, m, dTokenPos, sCountry, bCondMemo)
if bCondMemo:
if cActionType == "-":
# grammar error
nErrorStart = nOffset + m.start(eAct[0])
if nErrorStart not in dErrs or nPriority > dPriority.get(nErrorStart, -1):
dErrs[nErrorStart] = _createError(s, sx, sWhat, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bShowRuleId, sOption, bContext)
dPriority[nErrorStart] = nPriority
elif cActionType == "~":
# text processor
s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
bParagraphChange = True
bSentenceChange = True
if bDebug:
|
| ︙ | ︙ | |||
211 212 213 214 215 216 217 |
except Exception as e:
raise Exception(str(e), "# " + sLineId + " # " + sRuleId)
if bParagraphChange:
return (s, dErrs)
return (False, dErrs)
| | < < < | | < < | | | | | | > > > > > > > > > | | | | < > | | | | | | | | | < < < | | | | | | < < < < < < < < < | < | < < < < < < < < < < | < | | | | | | 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 |
except Exception as e:
raise Exception(str(e), "# " + sLineId + " # " + sRuleId)
if bParagraphChange:
return (s, dErrs)
return (False, dErrs)
def _createError (s, sx, sRepl, nOffset, m, iGroup, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext):
nStart = nOffset + m.start(iGroup)
nEnd = nOffset + m.end(iGroup)
# suggestions
if sRepl[0:1] == "=":
sugg = globals()[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
lSugg = list(map(str.capitalize, sugg.split("|")))
else:
lSugg = sugg.split("|")
else:
lSugg = []
elif sRepl == "_":
lSugg = []
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
lSugg = list(map(str.capitalize, m.expand(sRepl).split("|")))
else:
lSugg = m.expand(sRepl).split("|")
# Message
sMessage = globals()[sMsg[1:]](s, m) if sMsg[0:1] == "=" else m.expand(sMsg)
if bShowRuleId:
sMessage += " # " + sLineId + " # " + sRuleId
#
if _bWriterError:
xErr = SingleProofreadingError() # uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nStart
xErr.nErrorLength = nEnd - nStart
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sRuleId
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bShowRuleId:
xErr.aShortComment += " " + sLineId + " # " + sRuleId
xErr.aSuggestions = tuple(lSugg)
if sURL:
xProperty = PropertyValue()
xProperty.Name = "FullCommentURL"
xProperty.Value = sURL
xErr.aProperties = (xProperty,)
else:
xErr.aProperties = ()
return xErr
else:
dErr = {}
dErr["nStart"] = nStart
dErr["nEnd"] = nEnd
dErr["sLineId"] = sLineId
dErr["sRuleId"] = sRuleId
dErr["sType"] = sOption if sOption else "notype"
dErr["sMessage"] = sMessage
dErr["aSuggestions"] = lSugg
dErr["URL"] = sURL if sURL else ""
if bContext:
dErr['sUnderlined'] = self.sSentence0[nStart:nEnd]
dErr['sBefore'] = self.sSentence0[max(0,nStart-80):nStart]
dErr['sAfter'] = self.sSentence0[nEnd:nEnd+80]
return dErr
def _rewrite (sSentence, sRepl, iGroup, m, bUppercase):
"text processor: write <sRepl> in <sSentence> at <iGroup> position"
nLen = m.end(iGroup) - m.start(iGroup)
if sRepl == "*":
sNew = " " * nLen
|
| ︙ | ︙ | |||
602 603 604 605 606 607 608 |
self.sSentence = sSentence
self.sSentence0 = sSentence0
self.nOffsetWithinParagraph = nOffset
self.lToken = list(_oTokenizer.genTokens(sSentence, True))
self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lToken }
self.dTags = {}
self.dError = {}
| < | 578 579 580 581 582 583 584 585 586 587 588 589 590 591 |
self.sSentence = sSentence
self.sSentence0 = sSentence0
self.nOffsetWithinParagraph = nOffset
self.lToken = list(_oTokenizer.genTokens(sSentence, True))
self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lToken }
self.dTags = {}
self.dError = {}
def update (self, sSentence):
"update <sSentence> and retokenize"
self.sSentence = sSentence
self.lToken = list(_oTokenizer.genTokens(sSentence, True))
def _getNextMatchingNodes (self, dToken, dGraph, dNode, bDebug=False):
|
| ︙ | ︙ | |||
761 762 763 764 765 766 767 |
# grammar error
nTokenErrorStart = nTokenOffset + eAct[0]
if "bImmune" not in self.lToken[nTokenErrorStart]:
nTokenErrorEnd = (nTokenOffset + eAct[1]) if eAct[1] else nLastToken
nErrorStart = self.nOffsetWithinParagraph + self.lToken[nTokenErrorStart]["nStart"]
nErrorEnd = self.nOffsetWithinParagraph + self.lToken[nTokenErrorEnd]["nEnd"]
if nErrorStart not in dError or eAct[2] > dPriority.get(nErrorStart, -1):
| | | 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 |
# grammar error
nTokenErrorStart = nTokenOffset + eAct[0]
if "bImmune" not in self.lToken[nTokenErrorStart]:
nTokenErrorEnd = (nTokenOffset + eAct[1]) if eAct[1] else nLastToken
nErrorStart = self.nOffsetWithinParagraph + self.lToken[nTokenErrorStart]["nStart"]
nErrorEnd = self.nOffsetWithinParagraph + self.lToken[nTokenErrorEnd]["nEnd"]
if nErrorStart not in dError or eAct[2] > dPriority.get(nErrorStart, -1):
dError[nErrorStart] = self._createError(sWhat, nTokenOffset, nTokenErrorStart, nErrorStart, nErrorEnd, sLineId, sRuleId, True, eAct[3], eAct[4], bShowRuleId, "notype", bContext)
dPriority[nErrorStart] = eAct[2]
if bDebug:
print(" NEW_ERROR:", dError[nErrorStart], "\n ", dRule[sRuleId])
elif cActionType == "~":
# text processor
if bDebug:
print(" TAG_PREPARE:\n ", dRule[sRuleId])
|
| ︙ | ︙ | |||
801 802 803 804 805 806 807 |
if bDebug:
print(" COND_BREAK")
break
except Exception as e:
raise Exception(str(e), sLineId, sRuleId, self.sSentence)
return bChange, dError
| | < < < < < < < | | | | | | > > > > > > > > > | | | | < > | | | | | | | | | < < | | | | | | < < < < < < < < < | < | < < < < < < < < < < | < | | | | | | 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 |
if bDebug:
print(" COND_BREAK")
break
except Exception as e:
raise Exception(str(e), sLineId, sRuleId, self.sSentence)
return bChange, dError
def _createError (self, sSugg, nTokenOffset, iFirstToken, nStart, nEnd, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext):
# suggestions
if sSugg[0:1] == "=":
sSugg = globals()[sSugg[1:]](self.lToken, nTokenOffset)
if sSugg:
if bUppercase and self.lToken[iFirstToken]["sValue"][0:1].isupper():
lSugg = list(map(str.capitalize, sSugg.split("|")))
else:
lSugg = sSugg.split("|")
else:
lSugg = []
elif sSugg == "_":
lSugg = []
else:
if bUppercase and self.lToken[iFirstToken]["sValue"][0:1].isupper():
lSugg = list(map(str.capitalize, self._expand(sSugg, nTokenOffset).split("|")))
else:
lSugg = self._expand(sSugg, nTokenOffset).split("|")
# Message
sMessage = globals()[sMsg[1:]](self.lToken) if sMsg[0:1] == "=" else self._expand(sMsg, nTokenOffset)
if bShowRuleId:
sMessage += " " + sLineId + " # " + sRuleId
#
if _bWriterError:
xErr = SingleProofreadingError() # uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nStart
xErr.nErrorLength = nEnd - nStart
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sRuleId
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bShowRuleId:
xErr.aShortComment += " " + sLineId + " # " + sRuleId
xErr.aSuggestions = tuple(lSugg)
if sURL:
xProperty = PropertyValue()
xProperty.Name = "FullCommentURL"
xProperty.Value = sURL
xErr.aProperties = (xProperty,)
else:
xErr.aProperties = ()
return xErr
else:
dErr = {}
dErr["nStart"] = nStart
dErr["nEnd"] = nEnd
dErr["sLineId"] = sLineId
dErr["sRuleId"] = sRuleId
dErr["sType"] = sOption if sOption else "notype"
dErr["sMessage"] = sMessage
dErr["aSuggestions"] = lSugg
dErr["URL"] = sURL if sURL else ""
if bContext:
dErr['sUnderlined'] = self.sSentence0[nStart:nEnd]
dErr['sBefore'] = self.sSentence0[max(0,nStart-80):nStart]
dErr['sAfter'] = self.sSentence0[nEnd:nEnd+80]
return dErr
def _expand (self, sText, nTokenOffset):
#print("*", sText)
for m in re.finditer(r"\\([0-9]+)", sText):
sText = sText.replace(m.group(0), self.lToken[int(m.group(1))+nTokenOffset]["sValue"])
#print(">", sText)
return sText
|
| ︙ | ︙ |