Grammalecte  Diff

Differences From Artifact [d26ce3d16f]:

To Artifact [5c017c54f1]:


166
167
168
169
170
171
172
173

174
175
176
177
178
179
180
181
182
166
167
168
169
170
171
172

173


174
175
176
177
178
179
180







-
+
-
-







                bSentenceChange = False
            for sGraphName, sLineId in lRuleGroup:
                if bDebug:
                    print("\n>>>> GRAPH:", sGraphName, sLineId)
                bParagraphChange, errs = oSentence.parse(dAllGraph[sGraphName], dPriority, sCountry, dOptions, bShowRuleId, bDebug, bContext)
                dErrs.update(errs)
                if bParagraphChange:
                    s = oSentence.rewrite()
                    s = oSentence.rewrite(bDebug)
                    if bDebug:
                        print("~", oSentence.sSentence)
        elif not sOption or dOptions.get(sOption, False):
            # regex rules
            for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup:
                if sRuleId not in _aIgnoredRules:
                    for m in zRegex.finditer(s):
                        bCondMemo = None
                        for sFuncCond, cActionType, sWhat, *eAct in lActions:
847
848
849
850
851
852
853
854

855
856
857
858
859
860
861




862
863
864
865

866

867
868
869
870
871

872
873
874
875
876
877
878
845
846
847
848
849
850
851

852
853
854
855
856
857


858
859
860
861
862
863
864
865
866

867
868
869
870
871

872
873
874
875
876
877
878
879







-
+





-
-
+
+
+
+




+
-
+




-
+







                    print("Error. Text processor: number of replacements != number of tokens.")
                    return
                for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue):
                    if bUppercase:
                        sValue = sValue[0:1].upper() + sValue[1:]
                    self.lToken[i]["sNewValue"] = sValue

    def rewrite (self):
    def rewrite (self, bDebug=False):
        "rewrite the sentence, modify tokens, purge the token list"
        lNewToken = []
        for i, dToken in enumerate(self.lToken):
            if "bToRemove" in dToken:
                # remove useless token
                self.sSentence = self.sSentence[:self.nOffset+dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[self.nOffset+dToken["nEnd"]:]
                #print("removed:", dToken["sValue"])
                self.sSentence = self.sSentence[:dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[dToken["nEnd"]:]
                if bDebug:
                    print("removed:", dToken["sValue"])
                    print(self.sSentence)
            else:
                lNewToken.append(dToken)
                if "sNewValue" in dToken:
                    # rewrite token and sentence
                    if bDebug:
                    #print(dToken["sValue"], "->", dToken["sNewValue"])
                        print(dToken["sValue"], "->", dToken["sNewValue"])
                    dToken["sRealValue"] = dToken["sValue"]
                    dToken["sValue"] = dToken["sNewValue"]
                    nDiffLen = len(dToken["sRealValue"]) - len(dToken["sNewValue"])
                    sNewRepl = (dToken["sNewValue"] + " " * nDiffLen)  if nDiffLen >= 0  else dToken["sNewValue"][:len(dToken["sRealValue"])]
                    self.sSentence = self.sSentence[:self.nOffset+dToken["nStart"]] + sNewRepl + self.sSentence[self.nOffset+dToken["nEnd"]:]
                    self.sSentence = self.sSentence[:dToken["nStart"]] + sNewRepl + self.sSentence[dToken["nEnd"]:]
                    del dToken["sNewValue"]
        self.lToken.clear()
        self.lToken = lNewToken
        return self.sSentence