Overview
| Comment: | [core][js] gc engine: fix several issues |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
b4074ae3b3cf54a0cda6aea96fcc87f2 |
| User & Date: | olr on 2018-09-12 07:58:11 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-09-12
| ||
| 08:05 | [build] update code conversion to JS check-in: 0c1e2728dd user: olr tags: build, rg | |
| 07:58 | [core][js] gc engine: fix several issues check-in: b4074ae3b3 user: olr tags: core, rg | |
|
2018-09-11
| ||
| 21:20 | [core][js] gc engine: fix several issues check-in: a75b859e72 user: olr tags: core, rg | |
Changes
Modified gc_core/js/lang_core/gc_engine.js from [ab940d7e96] to [f31744b125].
| ︙ | |||
615 616 617 618 619 620 621 | 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 | - + |
// text processor
let nTokenStart = (eAct[0] > 0) ? nTokenOffset + eAct[0] : nLastToken + eAct[0];
let nTokenEnd = (eAct[1] > 0) ? nTokenOffset + eAct[1] : nLastToken + eAct[1];
this._tagAndPrepareTokenForRewriting(sWhat, nTokenStart, nTokenEnd, nTokenOffset, nLastToken, eAct[2], bDebug);
bChange = true;
if (bDebug) {
console.log(` TEXT_PROCESSOR: ${sRuleId} ${sLineId}`);
|
| ︙ | |||
803 804 805 806 807 808 809 | 803 804 805 806 807 808 809 810 811 812 813 814 815 816 | - - - |
}
//console.log(sText+"\nstart: "+m.start[iGroup]+" end:"+m.end[iGroup]);
return sText.slice(0, m.start[iGroup]) + sNew + sText.slice(m.end[iGroup]);
}
_tagAndPrepareTokenForRewriting (sWhat, nTokenRewriteStart, nTokenRewriteEnd, nTokenOffset, nLastToken, bCaseSvty, bDebug) {
// text processor: rewrite tokens between <nTokenRewriteStart> and <nTokenRewriteEnd> position
|
| ︙ | |||
876 877 878 879 880 881 882 | 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 | - + |
// rewrite the sentence, modify tokens, purge the token list
if (bDebug) {
console.log("REWRITE");
}
let lNewToken = [];
let nMergeUntil = 0;
let dTokenMerger = null;
|
| ︙ | |||
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 | 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 | + |
}
// search sPattern
return lMorph.some(sMorph => (sMorph.search(sPattern) !== -1));
}
function g_analyse (dToken, sPattern, sNegPattern="", nLeft=null, nRight=null, bMemorizeMorph=true) {
// analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies
let lMorph;
if (nLeft !== null) {
lMorph = _oSpellChecker.getMorph(dToken["sValue"].slice(nLeft, nRight));
if (bMemorizeMorph) {
dToken["lMorph"] = lMorph;
}
} else {
lMorph = _oSpellChecker.getMorph(dToken["sValue"]);
|
| ︙ | |||
1217 1218 1219 1220 1221 1222 1223 | 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 | - + - + |
return bResult;
}
function g_tag_before (dToken, dTags, sTag) {
if (dTags.has(sTag)) {
return false;
}
|
| ︙ |