Index: gc_core/py/lang_core/gc_engine.py ================================================================== --- gc_core/py/lang_core/gc_engine.py +++ gc_core/py/lang_core/gc_engine.py @@ -168,13 +168,11 @@ if bDebug: print("\n>>>> GRAPH:", sGraphName, sLineId) bParagraphChange, errs = oSentence.parse(dAllGraph[sGraphName], dPriority, sCountry, dOptions, bShowRuleId, bDebug, bContext) dErrs.update(errs) if bParagraphChange: - s = oSentence.rewrite() - if bDebug: - print("~", oSentence.sSentence) + s = oSentence.rewrite(bDebug) elif not sOption or dOptions.get(sOption, False): # regex rules for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup: if sRuleId not in _aIgnoredRules: for m in zRegex.finditer(s): @@ -849,28 +847,31 @@ for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue): if bUppercase: sValue = sValue[0:1].upper() + sValue[1:] self.lToken[i]["sNewValue"] = sValue - def rewrite (self): + def rewrite (self, bDebug=False): "rewrite the sentence, modify tokens, purge the token list" lNewToken = [] for i, dToken in enumerate(self.lToken): if "bToRemove" in dToken: # remove useless token - self.sSentence = self.sSentence[:self.nOffset+dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[self.nOffset+dToken["nEnd"]:] - #print("removed:", dToken["sValue"]) + self.sSentence = self.sSentence[:dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[dToken["nEnd"]:] + if bDebug: + print("removed:", dToken["sValue"]) + print(self.sSentence) else: lNewToken.append(dToken) if "sNewValue" in dToken: # rewrite token and sentence - #print(dToken["sValue"], "->", dToken["sNewValue"]) + if bDebug: + print(dToken["sValue"], "->", dToken["sNewValue"]) dToken["sRealValue"] = dToken["sValue"] dToken["sValue"] = dToken["sNewValue"] nDiffLen = len(dToken["sRealValue"]) - len(dToken["sNewValue"]) sNewRepl = (dToken["sNewValue"] + " " * nDiffLen) if nDiffLen >= 0 else dToken["sNewValue"][:len(dToken["sRealValue"])] - self.sSentence = self.sSentence[:self.nOffset+dToken["nStart"]] + sNewRepl + self.sSentence[self.nOffset+dToken["nEnd"]:] + self.sSentence = self.sSentence[:dToken["nStart"]] + sNewRepl + self.sSentence[dToken["nEnd"]:] del dToken["sNewValue"] self.lToken.clear() self.lToken = lNewToken return self.sSentence