Overview
| Comment: | [core][js] try to remove some warnings |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | webext2 |
| Files: | files | file ages | folders |
| SHA3-256: |
25be7eb67bed0e81ba5234e54a62df91 |
| User & Date: | olr on 2017-08-04 05:17:06 |
| Other Links: | branch diff | manifest | tags |
Context
|
2017-08-04
| ||
| 05:18 | [fx] unknown key in manifest check-in: 4151e87ea3 user: olr tags: fx, webext2 | |
| 05:17 | [core][js] try to remove some warnings check-in: 25be7eb67b user: olr tags: core, webext2 | |
|
2017-08-03
| ||
| 11:41 | [core][js] tests initialization check-in: af95395956 user: olr tags: core, webext2 | |
Changes
Modified gc_core/js/lang_core/gc_engine.js from [4d4d4ec095] to [ff4d262785].
1 2 3 4 5 6 7 8 9 |
// Grammar checker engine
${string}
${regex}
${map}
if (typeof(require) !== 'undefined') {
var helpers = require("resource://grammalecte/helpers.js");
| > > | 1 2 3 4 5 6 7 8 9 10 11 |
// Grammar checker engine
//"use strict";
${string}
${regex}
${map}
if (typeof(require) !== 'undefined') {
var helpers = require("resource://grammalecte/helpers.js");
|
| ︙ | ︙ | |||
82 83 84 85 86 87 88 |
// parse sentence
for (let [iStart, iEnd] of this._getSentenceBoundaries(sText)) {
if (4 < (iEnd - iStart) < 2000) {
dDA.clear();
//echo(sText.slice(iStart, iEnd));
try {
| | | 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
// parse sentence
for (let [iStart, iEnd] of this._getSentenceBoundaries(sText)) {
if (4 < (iEnd - iStart) < 2000) {
dDA.clear();
//echo(sText.slice(iStart, iEnd));
try {
[sNew, errs] = this._proofread(sText.slice(iStart, iEnd), sAlt.slice(iStart, iEnd), iStart, false, dDA, dPriority, sCountry, bDebug, bContext);
dErrors.gl_update(errs);
}
catch (e) {
helpers.logerror(e);
}
}
}
|
| ︙ | ︙ | |||
215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
oErr["aSuggestions"] = capitalizeArray(sRepl.gl_expand(m).split("|"));
} else {
oErr["aSuggestions"] = sRepl.gl_expand(m).split("|");
}
}
// Message
if (sMsg[0] === "=") {
sMessage = oEvalFunc[sMsg.slice(1)](s, m)
} else {
sMessage = sMsg.gl_expand(m);
}
if (bIdRule) {
sMessage += " ##" + sLineId + " #" + sRuleId;
| > | 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
oErr["aSuggestions"] = capitalizeArray(sRepl.gl_expand(m).split("|"));
} else {
oErr["aSuggestions"] = sRepl.gl_expand(m).split("|");
}
}
// Message
let sMessage = "";
if (sMsg[0] === "=") {
sMessage = oEvalFunc[sMsg.slice(1)](s, m)
} else {
sMessage = sMsg.gl_expand(m);
}
if (bIdRule) {
sMessage += " ##" + sLineId + " #" + sRuleId;
|
| ︙ | ︙ | |||
276 277 278 279 280 281 282 |
reactivateRule: function (sRuleId) {
_aIgnoredRules.delete(sRuleId);
},
listRules: function (sFilter=null) {
// generator: returns tuple (sOption, sLineId, sRuleId)
try {
| | | | | | 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 |
reactivateRule: function (sRuleId) {
_aIgnoredRules.delete(sRuleId);
},
listRules: function (sFilter=null) {
// generator: returns tuple (sOption, sLineId, sRuleId)
try {
for (let [sOption, lRuleGroup] of this._getRules(true)) {
for (let [x1, x2, sLineId, sRuleId, x3, x4] of lRuleGroup) {
if (!sFilter || sRuleId.test(sFilter)) {
yield [sOption, sLineId, sRuleId];
}
}
}
for (let [sOption, lRuleGroup] of this._getRules(false)) {
for (let [x1, x2, sLineId, sRuleId, x3, x4] of lRuleGroup) {
if (!sFilter || sRuleId.test(sFilter)) {
yield [sOption, sLineId, sRuleId];
}
}
}
}
catch (e) {
|
| ︙ | ︙ |
Modified gc_lang/fr/modules-js/gce_suggestions.js from [7f7dced66d] to [5dc3101886].
| ︙ | ︙ | |||
15 16 17 18 19 20 21 |
for (let sStem of stem(sFlex)) {
let tTags = conj._getTags(sStem);
if (tTags) {
// we get the tense
let aTense = new Set();
for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
let m;
| | | | 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
for (let sStem of stem(sFlex)) {
let tTags = conj._getTags(sStem);
if (tTags) {
// we get the tense
let aTense = new Set();
for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
let m;
let zVerb = new RegExp (">"+sStem+" .*?(:(?:Y|I[pqsf]|S[pq]|K))", "g");
while ((m = zVerb.exec(sMorph)) !== null) {
// stem must be used in regex to prevent confusion between different verbs (e.g. sauras has 2 stems: savoir and saurer)
if (m) {
if (m[1] === ":Y") {
aTense.add(":Ip");
aTense.add(":Iq");
aTense.add(":Is");
} else if (m[1] === ":P") {
|
| ︙ | ︙ |
Modified gc_lang/fr/modules/gce_suggestions.py from [948c38b1f8] to [e839f7efe5].
| ︙ | ︙ | |||
11 12 13 14 15 16 17 |
aSugg = set()
for sStem in stem(sFlex):
tTags = conj._getTags(sStem)
if tTags:
# we get the tense
aTense = set()
for sMorph in _dAnalyses.get(sFlex, []): # we don’t check if word exists in _dAnalyses, for it is assumed it has been done before
| | | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
aSugg = set()
for sStem in stem(sFlex):
tTags = conj._getTags(sStem)
if tTags:
# we get the tense
aTense = set()
for sMorph in _dAnalyses.get(sFlex, []): # we don’t check if word exists in _dAnalyses, for it is assumed it has been done before
for m in re.finditer(">"+sStem+" .*?(:(?:Y|I[pqsf]|S[pq]|K|P))", sMorph):
# stem must be used in regex to prevent confusion between different verbs (e.g. sauras has 2 stems: savoir and saurer)
if m:
if m.group(1) == ":Y":
aTense.add(":Ip")
aTense.add(":Iq")
aTense.add(":Is")
elif m.group(1) == ":P":
|
| ︙ | ︙ |