Overview
| Comment: | [core] gc engine: use expand for text processor too |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
816624027a90f6faa46014584838226b |
| User & Date: | olr on 2018-06-27 10:02:37 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-06-27
| ||
| 10:03 | [fr] conversion: regex rules -> graph rules check-in: e751b105e6 user: olr tags: fr, rg | |
| 10:02 | [core] gc engine: use expand for text processor too check-in: 816624027a user: olr tags: core, rg | |
| 07:43 | [core][build] nTokenOffset necessary for text processor functions check-in: 3a5a4d302e user: olr tags: core, build, rg | |
Changes
Modified gc_core/py/lang_core/gc_engine.py from [f1fdc10ea4] to [5bb9631cfb].
| ︙ | ︙ | |||
881 882 883 884 885 886 887 |
# Context
if bContext:
dErr['sUnderlined'] = self.sSentence0[dErr["nStart"]:dErr["nEnd"]]
dErr['sBefore'] = self.sSentence0[max(0,dErr["nStart"]-80):dErr["nStart"]]
dErr['sAfter'] = self.sSentence0[dErr["nEnd"]:dErr["nEnd"]+80]
return dErr
| | | | | | | | 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 |
# Context
if bContext:
dErr['sUnderlined'] = self.sSentence0[dErr["nStart"]:dErr["nEnd"]]
dErr['sBefore'] = self.sSentence0[max(0,dErr["nStart"]-80):dErr["nStart"]]
dErr['sAfter'] = self.sSentence0[dErr["nEnd"]:dErr["nEnd"]+80]
return dErr
def _expand (self, sText, nTokenOffset):
#print("*", sText)
for m in re.finditer(r"\\([0-9]+)", sText):
sText = sText.replace(m.group(0), self.lToken[int(m.group(1))+nTokenOffset]["sValue"])
#print(">", sText)
return sText
def _tagAndPrepareTokenForRewriting (self, sWhat, nTokenRewriteStart, nTokenRewriteEnd, nTokenOffset, bUppercase=True, bDebug=False):
"text processor: rewrite tokens between <nTokenRewriteStart> and <nTokenRewriteEnd> position"
if bDebug:
print("REWRITING:", nTokenRewriteStart, nTokenRewriteEnd)
if sWhat == "*":
# purge text
|
| ︙ | ︙ | |||
919 920 921 922 923 924 925 926 927 928 929 930 931 932 |
self.lToken[nTokenRewriteStart]["sNewValue"] = "_"
else:
for i in range(nTokenRewriteStart, nTokenRewriteEnd+1):
self.lToken[i]["sNewValue"] = "_"
else:
if sWhat.startswith("="):
sWhat = globals()[sWhat[1:]](self.lToken, nTokenOffset)
bUppercase = bUppercase and self.lToken[nTokenRewriteStart]["sValue"][0:1].isupper()
if nTokenRewriteEnd - nTokenRewriteStart == 0:
# one token
sWhat = sWhat + " " * (len(self.lToken[nTokenRewriteStart]["sValue"])-len(sWhat))
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
self.lToken[nTokenRewriteStart]["sNewValue"] = sWhat
| > > | 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 |
self.lToken[nTokenRewriteStart]["sNewValue"] = "_"
else:
for i in range(nTokenRewriteStart, nTokenRewriteEnd+1):
self.lToken[i]["sNewValue"] = "_"
else:
if sWhat.startswith("="):
sWhat = globals()[sWhat[1:]](self.lToken, nTokenOffset)
else:
sWhat = self._expand(sWhat, nTokenOffset)
bUppercase = bUppercase and self.lToken[nTokenRewriteStart]["sValue"][0:1].isupper()
if nTokenRewriteEnd - nTokenRewriteStart == 0:
# one token
sWhat = sWhat + " " * (len(self.lToken[nTokenRewriteStart]["sValue"])-len(sWhat))
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
self.lToken[nTokenRewriteStart]["sNewValue"] = sWhat
|
| ︙ | ︙ |