Index: compile_rules_graph.py ================================================================== --- compile_rules_graph.py +++ compile_rules_graph.py @@ -320,13 +320,13 @@ #sJSCallables = "// generated code, do not edit\nconst oEvalFunc = {\n" for sFuncName, sReturn in lFUNCTIONS: if sFuncName.startswith("g_c_"): # condition sParams = "lToken, nTokenOffset, sCountry, bCondMemo" elif sFuncName.startswith("g_m_"): # message - sParams = "lToken" + sParams = "lToken, nTokenOffset" elif sFuncName.startswith("g_s_"): # suggestion - sParams = "lToken" + sParams = "lToken, nTokenOffset" elif sFuncName.startswith("g_p_"): # preprocessor sParams = "lToken" elif sFuncName.startswith("g_d_"): # disambiguator sParams = "lToken, nTokenOffset" else: Index: gc_core/py/lang_core/gc_engine.py ================================================================== --- gc_core/py/lang_core/gc_engine.py +++ gc_core/py/lang_core/gc_engine.py @@ -623,11 +623,10 @@ if sNegPattern and any(re.search(sNegPattern, sMorph) for sMorph in _oSpellChecker.getMorph(dToken["sValue"])): continue if any(re.search(sPattern, sMorph) for sMorph in _oSpellChecker.getMorph(dToken["sValue"])): yield dGraph[dNode[""][sRegex]] - def parse (self, dPriority, sCountry="${country_default}", dOptions=None, bShowRuleId=False, bDebug=False, bContext=False): dErr = {} dPriority = {} # Key = position; value = priority dOpt = _dOptions if not dOptions else dOptions lPointer = [] @@ -672,11 +671,11 @@ nTokenErrorStart = nTokenOffset + eAct[0] nTokenErrorEnd = nTokenOffset + eAct[1] nErrorStart = self.nOffset + self.lToken[nTokenErrorStart]["nStart"] nErrorEnd = self.nOffset + self.lToken[nTokenErrorEnd]["nEnd"] if nErrorStart not in dErrs or eAct[2] > dPriority[nErrorStart]: - dErrs[nErrorStart] = self.createError(sWhat, nTokenErrorStart, nErrorStart, nErrorEnd, sLineId, sRuleId, True, eAct[3], eAct[4], bShowRuleId, "notype", bContext) + dErrs[nErrorStart] = self.createError(sWhat, nTokenOffset, nTokenErrorStart, nErrorStart, nErrorEnd, sLineId, sRuleId, True, eAct[3], eAct[4], bShowRuleId, "notype", bContext) dPriority[nErrorStart] = eAct[2] elif cActionType == "~": # text processor print("~") self._tagAndPrepareTokenForRewriting(sWhat, nTokenOffset + eAct[0], nTokenOffset + eAct[1]) @@ -695,37 +694,37 @@ break except Exception as e: raise Exception(str(e), sLineId) return bChange, dErrs - def _createWriterError (self, sRepl, iFirstToken, nStart, nEnd, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext): + def _createWriterError (self, sSugg, nTokenOffset, iFirstToken, nStart, nEnd, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext): "error for Writer (LO/OO)" xErr = SingleProofreadingError() #xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" ) xErr.nErrorStart = nStart xErr.nErrorLength = nEnd - nStart xErr.nErrorType = PROOFREADING xErr.aRuleIdentifier = sRuleId # suggestions - if sRepl[0:1] == "=": - sSugg = globals()[sRepl[1:]](self.lToken) + if sSugg[0:1] == "=": + sSugg = globals()[sSugg[1:]](self.lToken) if sSugg: if bUppercase and self.lToken[iFirstToken]["sValue"][0:1].isupper(): xErr.aSuggestions = tuple(map(str.capitalize, sSugg.split("|"))) else: xErr.aSuggestions = tuple(sSugg.split("|")) else: xErr.aSuggestions = () - elif sRepl == "_": + elif sSugg == "_": xErr.aSuggestions = () else: if bUppercase and self.lToken[iFirstToken]["sValue"][0:1].isupper(): - xErr.aSuggestions = tuple(map(str.capitalize, sRepl.split("|"))) + xErr.aSuggestions = tuple(map(str.capitalize, self._expand(sSugg, nTokenOffset).split("|"))) else: - xErr.aSuggestions = tuple(sRepl.split("|")) + xErr.aSuggestions = tuple(self._expand(sSugg, nTokenOffset).split("|")) # Message - sMessage = globals()[sMsg[1:]](self.lToken) if sMsg[0:1] == "=" else sMsg + sMessage = globals()[sMsg[1:]](self.lToken) if sMsg[0:1] == "=" else self._expand(sMsg, nTokenOffset) xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog if bShowRuleId: xErr.aShortComment += " " + sLineId + " # " + sRuleId # URL @@ -736,37 +735,37 @@ xErr.aProperties = (p,) else: xErr.aProperties = () return xErr - def _createDictError (self, sRepl, iFirstToken, nStart, nEnd, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext): + def _createDictError (self, sSugg, nTokenOffset, iFirstToken, nStart, nEnd, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext): "error as a dictionary" dErr = {} dErr["nStart"] = nStart dErr["nEnd"] = nEnd dErr["sLineId"] = sLineId dErr["sRuleId"] = sRuleId dErr["sType"] = sOption if sOption else "notype" # suggestions - if sRepl[0:1] == "=": - sugg = globals()[sRepl[1:]](self.lToken) - if sugg: + if sSugg[0:1] == "=": + sSugg = globals()[sSugg[1:]](self.lToken) + if sSugg: if bUppercase and self.lToken[iFirstToken]["sValue"][0:1].isupper(): - dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|"))) + dErr["aSuggestions"] = list(map(str.capitalize, sSugg.split("|"))) else: - dErr["aSuggestions"] = sugg.split("|") + dErr["aSuggestions"] = sSugg.split("|") else: dErr["aSuggestions"] = [] - elif sRepl == "_": + elif sSugg == "_": dErr["aSuggestions"] = [] else: if bUppercase and self.lToken[iFirstToken]["sValue"][0:1].isupper(): - dErr["aSuggestions"] = list(map(str.capitalize, sRepl.split("|"))) + dErr["aSuggestions"] = list(map(str.capitalize, self._expand(sSugg, nTokenOffset).split("|"))) else: - dErr["aSuggestions"] = sRepl.split("|") + dErr["aSuggestions"] = self._expand(sSugg, nTokenOffset).split("|") # Message - dErr["sMessage"] = globals()[sMsg[1:]](self.lToken) if sMsg[0:1] == "=" else sMsg + dErr["sMessage"] = globals()[sMsg[1:]](self.lToken) if sMsg[0:1] == "=" else self._expand(sMsg, nTokenOffset) if bShowRuleId: dErr["sMessage"] += " " + sLineId + " # " + sRuleId # URL dErr["URL"] = sURL if sURL else "" # Context @@ -773,10 +772,17 @@ if bContext: dErr['sUnderlined'] = self.sSentence0[dErr["nStart"]:dErr["nEnd"]] dErr['sBefore'] = self.sSentence0[max(0,dErr["nStart"]-80):dErr["nStart"]] dErr['sAfter'] = self.sSentence0[dErr["nEnd"]:dErr["nEnd"]+80] return dErr + + def _expand (self, sMsg, nTokenOffset): + print(sMsg) + for m in re.finditer(r"\\([0-9]+)", sMsg): + sMsg = sMsg.replace(m.group(0), self.lToken[int(m.group(1))+nTokenOffset]["sValue"]) + print(sMsg) + return sMsg def _tagAndPrepareTokenForRewriting (self, sWhat, nTokenRewriteStart, nTokenRewriteEnd, bUppercase=True): "text processor: rewrite tokens between and position" if sWhat == "*": # purge text Index: gc_lang/fr/rules_graph.grx ================================================================== --- gc_lang/fr/rules_graph.grx +++ gc_lang/fr/rules_graph.grx @@ -73,20 +73,20 @@ TEST: il ne pense qu’à sa gueule. __avoir_confiance_en__ - >avoir confiance (dans) [moi|toi|soi|lui|elle|nous|vous|eux|elles] - <<- -1>> en # Avoir confiance en quelqu’un ou quelque chose.|http://grammalecte.net + >avoir confiance dans [moi|toi|soi|lui|elle|nous|vous|eux|elles] + <<- -3>> en # Avoir confiance en quelqu’un ou quelque chose.\3 \1 \2 \3|http://grammalecte.net TEST: Elle avait confiance {{dans}} lui. __code_legacy__ legacy code code legacy - <<- -1:2>> code hérité|code reliquat # Anglicisme superflu. + <<- -1:2>> code hérité|code reliquat|\1-\2|\2-\1 # \1 \2. Anglicisme superflu. TEST: c’est du {{legacy code}}. TEST: ce {{code legacy}} est un cauchemar @@ -104,9 +104,9 @@ TEST: Ça me fait {{plaisirs}}. __test__ - je ~préf[éè]r [que|qu’] @(?::Os|:M)¬:X @:I - <<- morph(\1, ":V") and morph(\4, ":Os|:M", ":X") -5>> SUBJONCTIF # SUBJONCTIF. + je ~co[mn]putes? [que|qu’] @(?::Os|:M)¬:X @:I + <<- morph(\4, ":Os|:M", ":X") -5>> \1|\5 # SUBJONCTIF. -TEST: je préférerais qu’Isabelle {{est}} partie. +TEST: je conpute qu’Isabelle {{est}} partie.