Overview
| Comment: | [core] small debugging modification |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
d5decbf54f8d310f53474b03335ba830 |
| User & Date: | olr on 2018-06-18 18:28:37 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-06-18
| ||
| 19:15 | [build] small code clarification check-in: 4bd95a4517 user: olr tags: build, rg | |
| 18:28 | [core] small debugging modification check-in: d5decbf54f user: olr tags: core, rg | |
| 18:13 | merge trunk check-in: 10458c45e6 user: olr tags: rg | |
Changes
Modified gc_core/py/lang_core/gc_engine.py from [7520fb92f1] to [bea60fba77].
| ︙ | ︙ | |||
854 855 856 857 858 859 860 |
lNewToken = []
for i, dToken in enumerate(self.lToken):
if "bToRemove" in dToken:
# remove useless token
self.sSentence = self.sSentence[:dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[dToken["nEnd"]:]
if bDebug:
print("removed:", dToken["sValue"])
| < > > | 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 |
lNewToken = []
for i, dToken in enumerate(self.lToken):
if "bToRemove" in dToken:
# remove useless token
self.sSentence = self.sSentence[:dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[dToken["nEnd"]:]
if bDebug:
print("removed:", dToken["sValue"])
else:
lNewToken.append(dToken)
if "sNewValue" in dToken:
# rewrite token and sentence
if bDebug:
print(dToken["sValue"], "->", dToken["sNewValue"])
dToken["sRealValue"] = dToken["sValue"]
dToken["sValue"] = dToken["sNewValue"]
nDiffLen = len(dToken["sRealValue"]) - len(dToken["sNewValue"])
sNewRepl = (dToken["sNewValue"] + " " * nDiffLen) if nDiffLen >= 0 else dToken["sNewValue"][:len(dToken["sRealValue"])]
self.sSentence = self.sSentence[:dToken["nStart"]] + sNewRepl + self.sSentence[dToken["nEnd"]:]
del dToken["sNewValue"]
if bDebug:
print(self.sSentence)
self.lToken.clear()
self.lToken = lNewToken
return self.sSentence
#### Analyse tokens
|
| ︙ | ︙ |