223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
|
if (bFullInfo) {
this.lTokens0 = Array.from(this.lTokens);
// the list of tokens is duplicated, to keep tokens from being deleted when analysis
}
this.parseText(this.sSentence, this.sSentence0, false, iStart, sCountry, dOpt, bShowRuleId, bDebug, bContext);
if (bFullInfo) {
for (let oToken of this.lTokens0) {
if (oToken["sType"] == "WORD") {
oToken["bValidToken"] = gc_engine.oSpellChecker.isValidToken(oToken["sValue"]);
}
if (!oToken.hasOwnProperty("lMorph")) {
oToken["lMorph"] = gc_engine.oSpellChecker.getMorph(oToken["sValue"]);
}
gc_engine.oSpellChecker.setLabelsOnToken(oToken);
}
lSentences.push({
"nStart": iStart,
"nEnd": iEnd,
"sSentence": this.sSentence0,
"lTokens": this.lTokens0,
|
<
<
<
<
<
<
|
223
224
225
226
227
228
229
230
231
232
233
234
235
236
|
if (bFullInfo) {
this.lTokens0 = Array.from(this.lTokens);
// the list of tokens is duplicated, to keep tokens from being deleted when analysis
}
this.parseText(this.sSentence, this.sSentence0, false, iStart, sCountry, dOpt, bShowRuleId, bDebug, bContext);
if (bFullInfo) {
for (let oToken of this.lTokens0) {
gc_engine.oSpellChecker.setLabelsOnToken(oToken);
}
lSentences.push({
"nStart": iStart,
"nEnd": iEnd,
"sSentence": this.sSentence0,
"lTokens": this.lTokens0,
|