Overview
Comment: | [core] gc core: new action -> merge and rewrite |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk | core |
Files: | files | file ages | folders |
SHA3-256: |
a88fc2da75fab029d30ebf404e92f622 |
User & Date: | olr on 2020-09-28 12:51:43 |
Other Links: | manifest | tags |
Context
2020-09-30
| ||
12:01 | [fr] ajustements check-in: 66a7f50008 user: olr tags: trunk, fr | |
2020-09-28
| ||
12:51 | [core] gc core: new action -> merge and rewrite check-in: a88fc2da75 user: olr tags: trunk, core | |
2020-09-20
| ||
09:06 | [fr] ajustements check-in: e60b89c599 user: olr tags: trunk, fr | |
Changes
Modified gc_core/js/lang_core/gc_engine.js from [5b16fd7058] to [e5451f03eb].
︙ | ︙ | |||
909 910 911 912 913 914 915 916 917 918 919 920 921 922 | } } } else if (sWhat === "␣") { // merge tokens this.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd; } else if (sWhat === "_") { // neutralized token if (nTokenRewriteEnd - nTokenRewriteStart == 0) { this.lTokens[nTokenRewriteStart]["sNewValue"] = "_"; } else { for (let i = nTokenRewriteStart; i <= nTokenRewriteEnd; i++) { this.lTokens[i]["sNewValue"] = "_"; | > > > > > | 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 | } } } else if (sWhat === "␣") { // merge tokens this.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd; } else if (sWhat.startsWith("␣")) { sWhat = this._expand(sWhat, nTokenOffset, nLastToken); this.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd; this.lTokens[nTokenRewriteStart]["sMergedValue"] = sWhat.slice(1); } else if (sWhat === "_") { // neutralized token if (nTokenRewriteEnd - nTokenRewriteStart == 0) { this.lTokens[nTokenRewriteStart]["sNewValue"] = "_"; } else { for (let i = nTokenRewriteStart; i <= nTokenRewriteEnd; i++) { this.lTokens[i]["sNewValue"] = "_"; |
︙ | ︙ | |||
979 980 981 982 983 984 985 986 987 988 989 990 991 992 | oMergingToken["sValue"] += " ".repeat(oToken["nStart"] - oMergingToken["nEnd"]) + oToken["sValue"]; oMergingToken["nEnd"] = oToken["nEnd"]; if (bDebug) { console.log(" MERGED TOKEN: " + oMergingToken["sValue"]); } oToken["bMerged"] = true; bKeepToken = false; } if (oToken.hasOwnProperty("nMergeUntil")) { if (iToken > nMergeUntil) { // this token is not already merged with a previous token oMergingToken = oToken; } if (oToken["nMergeUntil"] > nMergeUntil) { nMergeUntil = oToken["nMergeUntil"]; | > > > > > | 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 | oMergingToken["sValue"] += " ".repeat(oToken["nStart"] - oMergingToken["nEnd"]) + oToken["sValue"]; oMergingToken["nEnd"] = oToken["nEnd"]; if (bDebug) { console.log(" MERGED TOKEN: " + oMergingToken["sValue"]); } oToken["bMerged"] = true; bKeepToken = false; if (iToken == nMergeUntil && oMergingToken.hasOwnProperty("sMergedValue")) { oMergingToken["sValue"] = oMergingToken["sMergedValue"]; let sSpaceFiller = " ".repeat(oToken["nEnd"] - oMergingToken["nStart"] - oMergingToken["sMergedValue"].length); this.sSentence = this.sSentence.slice(0, oMergingToken["nStart"]) + oMergingToken["sMergedValue"] + sSpaceFiller + this.sSentence.slice(oToken["nEnd"]); } } if (oToken.hasOwnProperty("nMergeUntil")) { if (iToken > nMergeUntil) { // this token is not already merged with a previous token oMergingToken = oToken; } if (oToken["nMergeUntil"] > nMergeUntil) { nMergeUntil = oToken["nMergeUntil"]; |
︙ | ︙ |
Modified gc_core/py/lang_core/gc_engine.py from [20a4e92e8f] to [05ee688cb4].
︙ | ︙ | |||
792 793 794 795 796 797 798 799 800 801 802 803 804 805 | self.lTokens[nTokenRewriteStart]["bToRemove"] = True else: for i in range(nTokenRewriteStart, nTokenRewriteEnd+1): self.lTokens[i]["bToRemove"] = True elif sWhat == "␣": # merge tokens self.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd elif sWhat == "_": # neutralized token if nTokenRewriteEnd - nTokenRewriteStart == 0: self.lTokens[nTokenRewriteStart]["sNewValue"] = "_" else: for i in range(nTokenRewriteStart, nTokenRewriteEnd+1): self.lTokens[i]["sNewValue"] = "_" | > > > > | 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 | self.lTokens[nTokenRewriteStart]["bToRemove"] = True else: for i in range(nTokenRewriteStart, nTokenRewriteEnd+1): self.lTokens[i]["bToRemove"] = True elif sWhat == "␣": # merge tokens self.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd elif sWhat.startswith("␣"): sWhat = self._expand(sWhat, nTokenOffset, nLastToken) self.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd self.lTokens[nTokenRewriteStart]["sMergedValue"] = sWhat[1:] elif sWhat == "_": # neutralized token if nTokenRewriteEnd - nTokenRewriteStart == 0: self.lTokens[nTokenRewriteStart]["sNewValue"] = "_" else: for i in range(nTokenRewriteStart, nTokenRewriteEnd+1): self.lTokens[i]["sNewValue"] = "_" |
︙ | ︙ | |||
844 845 846 847 848 849 850 851 852 853 854 855 856 857 | # token to merge dTokenMerger["sValue"] += " " * (dToken["nStart"] - dTokenMerger["nEnd"]) + dToken["sValue"] dTokenMerger["nEnd"] = dToken["nEnd"] if bDebug: echo(" MERGED TOKEN: " + dTokenMerger["sValue"]) dToken["bMerged"] = True bKeepToken = False if "nMergeUntil" in dToken: # first token to be merge with if iToken > nMergeUntil: # this token is not to be merged with a previous token dTokenMerger = dToken if dToken["nMergeUntil"] > nMergeUntil: nMergeUntil = dToken["nMergeUntil"] del dToken["nMergeUntil"] | > > > > | 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | # token to merge dTokenMerger["sValue"] += " " * (dToken["nStart"] - dTokenMerger["nEnd"]) + dToken["sValue"] dTokenMerger["nEnd"] = dToken["nEnd"] if bDebug: echo(" MERGED TOKEN: " + dTokenMerger["sValue"]) dToken["bMerged"] = True bKeepToken = False if iToken == nMergeUntil and "sMergedValue" in dTokenMerger: dTokenMerger["sValue"] = dTokenMerger["sMergedValue"] sSpaceFiller = " " * (dToken["nEnd"] - dTokenMerger["nStart"] - len(dTokenMerger["sMergedValue"])) self.sSentence = self.sSentence[:dTokenMerger["nStart"]] + dTokenMerger["sMergedValue"] + sSpaceFiller + self.sSentence[dToken["nEnd"]:] if "nMergeUntil" in dToken: # first token to be merge with if iToken > nMergeUntil: # this token is not to be merged with a previous token dTokenMerger = dToken if dToken["nMergeUntil"] > nMergeUntil: nMergeUntil = dToken["nMergeUntil"] del dToken["nMergeUntil"] |
︙ | ︙ |