Grammalecte  Check-in [607d532bca]

Overview
Comment:[fx][graphspell] merge multid: multi-dictionaries > editable personal dictionary
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk | new_feature | fx | graphspell
Files: files | file ages | folders
SHA3-256: 607d532bca197ef58087e59512fc79a882b44c8f4881ffe7649165e79684cfd7
User & Date: olr on 2018-02-16 15:46:19
Other Links: manifest | tags
Context
2018-02-17
11:50
[build][graphspell][core] build with options to integrate extended and personal dictionaries check-in: e0d79cdbf4 user: olr tags: trunk, core, build, graphspell
2018-02-16
15:46
[fx][graphspell] merge multid: multi-dictionaries > editable personal dictionary check-in: 607d532bca user: olr tags: trunk, new_feature, fx, graphspell
15:40
[grphspell][js] ibdawg: use ... instead of concat check-in: 254e33fe66 user: olr tags: graphspell, multid
2018-02-13
13:38
[graphspell][js] rename vars check-in: 62c389aedf user: olr tags: trunk, graphspell
Changes

Modified compile_rules.py from [7fa2e820bf] to [9bd1433006].

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
    s = re.sub(r"textarea0\(\s*", 'look(sx, ', s)                                           # textarea0(s)
    s = re.sub(r"before0_chk1\(\s*", 'look_chk1(dDA, sx[:m.start()], 0, ', s)               # before0_chk1(s)
    s = re.sub(r"after0_chk1\(\s*", 'look_chk1(dDA, sx[m.end():], m.end(), ', s)            # after0_chk1(s)
    s = re.sub(r"textarea0_chk1\(\s*", 'look_chk1(dDA, sx, 0, ', s)                         # textarea0_chk1(s)
    s = re.sub(r"isEndOfNG\(\s*\)", 'isEndOfNG(dDA, s[m.end():], m.end())', s)              # isEndOfNG(s)
    s = re.sub(r"isNextNotCOD\(\s*\)", 'isNextNotCOD(dDA, s[m.end():], m.end())', s)        # isNextNotCOD(s)
    s = re.sub(r"isNextVerb\(\s*\)", 'isNextVerb(dDA, s[m.end():], m.end())', s)            # isNextVerb(s)
    s = re.sub(r"\bspell *[(]", '_oDict.isValid(', s)
    s = re.sub(r"[\\](\d+)", 'm.group(\\1)', s)
    return s


def uppercase (s, sLang):
    "(flag i is not enough): converts regex to uppercase regex: 'foo' becomes '[Ff][Oo][Oo]', but 'Bar' becomes 'B[Aa][Rr]'."
    sUp = ""







|







52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
    s = re.sub(r"textarea0\(\s*", 'look(sx, ', s)                                           # textarea0(s)
    s = re.sub(r"before0_chk1\(\s*", 'look_chk1(dDA, sx[:m.start()], 0, ', s)               # before0_chk1(s)
    s = re.sub(r"after0_chk1\(\s*", 'look_chk1(dDA, sx[m.end():], m.end(), ', s)            # after0_chk1(s)
    s = re.sub(r"textarea0_chk1\(\s*", 'look_chk1(dDA, sx, 0, ', s)                         # textarea0_chk1(s)
    s = re.sub(r"isEndOfNG\(\s*\)", 'isEndOfNG(dDA, s[m.end():], m.end())', s)              # isEndOfNG(s)
    s = re.sub(r"isNextNotCOD\(\s*\)", 'isNextNotCOD(dDA, s[m.end():], m.end())', s)        # isNextNotCOD(s)
    s = re.sub(r"isNextVerb\(\s*\)", 'isNextVerb(dDA, s[m.end():], m.end())', s)            # isNextVerb(s)
    s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s)
    s = re.sub(r"[\\](\d+)", 'm.group(\\1)', s)
    return s


def uppercase (s, sLang):
    "(flag i is not enough): converts regex to uppercase regex: 'foo' becomes '[Ff][Oo][Oo]', but 'Bar' becomes 'B[Aa][Rr]'."
    sUp = ""

Modified gc_core/js/lang_core/gc_engine.js from [2f5e964b5d] to [454f9a423d].

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
}


// data
let _sAppContext = "";                                  // what software is running
let _dOptions = null;
let _aIgnoredRules = new Set();
let _oDict = null;
let _dAnalyses = new Map();                             // cache for data from dictionary


var gc_engine = {

    //// Informations








|







34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
}


// data
let _sAppContext = "";                                  // what software is running
let _dOptions = null;
let _aIgnoredRules = new Set();
let _oSpellChecker = null;
let _dAnalyses = new Map();                             // cache for data from dictionary


var gc_engine = {

    //// Informations

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
    },

    //// Initialization

    load: function (sContext="JavaScript", sPath="") {
        try {
            if (typeof(require) !== 'undefined') {
                var ibdawg = require("resource://grammalecte/graphspell/ibdawg.js");
                _oDict = new ibdawg.IBDAWG("${dic_filename}.json");
            } else {
                _oDict = new IBDAWG("${dic_filename}.json", sPath);
            }
            _sAppContext = sContext;
            _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
        }
        catch (e) {
            helpers.logerror(e);
        }
    },

    getDictionary: function () {
        return _oDict;
    },

    //// Options

    setOption: function (sOpt, bVal) {
        if (_dOptions.has(sOpt)) {
            _dOptions.set(sOpt, bVal);







|
|

|









|
|







318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
    },

    //// Initialization

    load: function (sContext="JavaScript", sPath="") {
        try {
            if (typeof(require) !== 'undefined') {
                var spellchecker = require("resource://grammalecte/graphspell/spellchecker.js");
                _oSpellChecker = new spellchecker.SpellChecker("${lang}", "", "${dic_filename}.json");
            } else {
                _oSpellChecker = new SpellChecker("${lang}", sPath, "${dic_filename}.json");
            }
            _sAppContext = sContext;
            _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
        }
        catch (e) {
            helpers.logerror(e);
        }
    },

    getSpellChecker: function () {
        return _oSpellChecker;
    },

    //// Options

    setOption: function (sOpt, bVal) {
        if (_dOptions.has(sOpt)) {
            _dOptions.set(sOpt, bVal);
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
        helpers.echo("DA: " + dDA.get(aWord[0]));
    }
    helpers.echo("FSA: " + _dAnalyses.get(aWord[1]));
    return true;
}

function _storeMorphFromFSA (sWord) {
    // retrieves morphologies list from _oDict -> _dAnalyses
    //helpers.echo("register: "+sWord + " " + _oDict.getMorph(sWord).toString())
    _dAnalyses.set(sWord, _oDict.getMorph(sWord));
    return !!_dAnalyses.get(sWord);
}

function morph (dDA, aWord, sPattern, bStrict=true, bNoWord=false) {
    // analyse a tuple (position, word), return true if sPattern in morphologies (disambiguation on)
    if (!aWord) {
        //helpers.echo("morph: noword, returns " + bNoWord);







|
|
|







386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
        helpers.echo("DA: " + dDA.get(aWord[0]));
    }
    helpers.echo("FSA: " + _dAnalyses.get(aWord[1]));
    return true;
}

function _storeMorphFromFSA (sWord) {
    // retrieves morphologies list from _oSpellChecker -> _dAnalyses
    //helpers.echo("register: "+sWord + " " + _oSpellChecker.getMorph(sWord).toString())
    _dAnalyses.set(sWord, _oSpellChecker.getMorph(sWord));
    return !!_dAnalyses.get(sWord);
}

function morph (dDA, aWord, sPattern, bStrict=true, bNoWord=false) {
    // analyse a tuple (position, word), return true if sPattern in morphologies (disambiguation on)
    if (!aWord) {
        //helpers.echo("morph: noword, returns " + bNoWord);
640
641
642
643
644
645
646
647
648
649
650
651
652
653
    exports._rewrite = gc_engine._rewrite;
    exports.ignoreRule = gc_engine.ignoreRule;
    exports.resetIgnoreRules = gc_engine.resetIgnoreRules;
    exports.reactivateRule = gc_engine.reactivateRule;
    exports.listRules = gc_engine.listRules;
    exports._getRules = gc_engine._getRules;
    exports.load = gc_engine.load;
    exports.getDictionary = gc_engine.getDictionary;
    exports.setOption = gc_engine.setOption;
    exports.setOptions = gc_engine.setOptions;
    exports.getOptions = gc_engine.getOptions;
    exports.getDefaultOptions = gc_engine.getDefaultOptions;
    exports.resetOptions = gc_engine.resetOptions;
}







|






640
641
642
643
644
645
646
647
648
649
650
651
652
653
    exports._rewrite = gc_engine._rewrite;
    exports.ignoreRule = gc_engine.ignoreRule;
    exports.resetIgnoreRules = gc_engine.resetIgnoreRules;
    exports.reactivateRule = gc_engine.reactivateRule;
    exports.listRules = gc_engine.listRules;
    exports._getRules = gc_engine._getRules;
    exports.load = gc_engine.load;
    exports.getSpellChecker = gc_engine.getSpellChecker;
    exports.setOption = gc_engine.setOption;
    exports.setOptions = gc_engine.setOptions;
    exports.getOptions = gc_engine.getOptions;
    exports.getDefaultOptions = gc_engine.getDefaultOptions;
    exports.resetOptions = gc_engine.resetOptions;
}

Modified gc_core/py/lang_core/gc_engine.py from [9fc11201d4] to [eded256052].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Grammalecte
# Grammar checker engine

import re
import sys
import os
import traceback
#import unicodedata
from itertools import chain

from ..graphspell.ibdawg import IBDAWG
from ..graphspell.echo import echo
from . import gc_options


__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
            "load", "parse", "getDictionary", \
            "setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
            "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules" ]

__version__ = "${version}"


lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"

_rules = None                               # module gc_rules

# data
_sAppContext = ""                           # what software is running
_dOptions = None
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {}                             # cache for data from dictionary



#### Parsing

def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False):










|





|



















|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Grammalecte
# Grammar checker engine

import re
import sys
import os
import traceback
#import unicodedata
from itertools import chain

from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from . import gc_options


__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
            "load", "parse", "getSpellChecker", \
            "setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
            "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules" ]

__version__ = "${version}"


lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"

_rules = None                               # module gc_rules

# data
_sAppContext = ""                           # what software is running
_dOptions = None
_aIgnoredRules = set()
_oSpellChecker = None
_dAnalyses = {}                             # cache for data from dictionary



#### Parsing

def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False):
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
    #import lightproof_handler_${implname} as opt
    _createError = _createWriterError
except ImportError:
    _createError = _createDictError


def load (sContext="Python"):
    global _oDict
    global _sAppContext
    global _dOptions
    try:
        _oDict = IBDAWG("${dic_filename}.bdic")
        _sAppContext = sContext
        _dOptions = dict(gc_options.getOptions(sContext))   # duplication necessary, to be able to reset to default
    except:
        traceback.print_exc()


def setOption (sOpt, bVal):







|



|







284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
    #import lightproof_handler_${implname} as opt
    _createError = _createWriterError
except ImportError:
    _createError = _createDictError


def load (sContext="Python"):
    global _oSpellChecker
    global _sAppContext
    global _dOptions
    try:
        _oSpellChecker = SpellChecker("${lang}", "${dic_filename}.bdic")
        _sAppContext = sContext
        _dOptions = dict(gc_options.getOptions(sContext))   # duplication necessary, to be able to reset to default
    except:
        traceback.print_exc()


def setOption (sOpt, bVal):
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344


def resetOptions ():
    global _dOptions
    _dOptions = dict(gc_options.getOptions(_sAppContext))


def getDictionary ():
    return _oDict


def _getRules (bParagraph):
    try:
        if not bParagraph:
            return _rules.lSentenceRules
        return _rules.lParagraphRules







|
|







329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344


def resetOptions ():
    global _dOptions
    _dOptions = dict(gc_options.getOptions(_sAppContext))


def getSpellChecker ():
    return _oSpellChecker


def _getRules (bParagraph):
    try:
        if not bParagraph:
            return _rules.lSentenceRules
        return _rules.lParagraphRules
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
    if tWord[0] in dDA:
        echo("DA: " + str(dDA[tWord[0]]))
    echo("FSA: " + str(_dAnalyses[tWord[1]]))
    return True


def _storeMorphFromFSA (sWord):
    "retrieves morphologies list from _oDict -> _dAnalyses"
    global _dAnalyses
    _dAnalyses[sWord] = _oDict.getMorph(sWord)
    return True  if _dAnalyses[sWord]  else False


def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
    "analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
    if not tWord:
        return bNoWord







|

|







394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
    if tWord[0] in dDA:
        echo("DA: " + str(dDA[tWord[0]]))
    echo("FSA: " + str(_dAnalyses[tWord[1]]))
    return True


def _storeMorphFromFSA (sWord):
    "retrieves morphologies list from _oSpellChecker -> _dAnalyses"
    global _dAnalyses
    _dAnalyses[sWord] = _oSpellChecker.getMorph(sWord)
    return True  if _dAnalyses[sWord]  else False


def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
    "analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
    if not tWord:
        return bNoWord

Modified gc_lang/fr/modules-js/gce_suggestions.js from [d955978cbc] to [0c31bc1a27].

206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
        } else if (sGender == ":f") {
            return suggFemPlur(sFlex);
        }
    }
    let aSugg = new Set();
    if (!sFlex.includes("-")) {
        if (sFlex.endsWith("l")) {
            if (sFlex.endsWith("al") && sFlex.length > 2 && _oDict.isValid(sFlex.slice(0,-1)+"ux")) {
                aSugg.add(sFlex.slice(0,-1)+"ux");
            }
            if (sFlex.endsWith("ail") && sFlex.length > 3 && _oDict.isValid(sFlex.slice(0,-2)+"ux")) {
                aSugg.add(sFlex.slice(0,-2)+"ux");
            }
        }
        if (_oDict.isValid(sFlex+"s")) {
            aSugg.add(sFlex+"s");
        }
        if (_oDict.isValid(sFlex+"x")) {
            aSugg.add(sFlex+"x");
        }
    }
    if (mfsp.hasMiscPlural(sFlex)) {
        mfsp.getMiscPlural(sFlex).forEach(function(x) { aSugg.add(x); });
    }
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

function suggSing (sFlex) {
    // returns singular forms assuming sFlex is plural
    if (sFlex.includes("-")) {
        return "";
    }
    let aSugg = new Set();
    if (sFlex.endsWith("ux")) {
        if (_oDict.isValid(sFlex.slice(0,-2)+"l")) {
            aSugg.add(sFlex.slice(0,-2)+"l");
        }
        if (_oDict.isValid(sFlex.slice(0,-2)+"il")) {
            aSugg.add(sFlex.slice(0,-2)+"il");
        }
    }
    if (_oDict.isValid(sFlex.slice(0,-1))) {
        aSugg.add(sFlex.slice(0,-1));
    }
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}







|


|



|


|



















|


|



|







206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
        } else if (sGender == ":f") {
            return suggFemPlur(sFlex);
        }
    }
    let aSugg = new Set();
    if (!sFlex.includes("-")) {
        if (sFlex.endsWith("l")) {
            if (sFlex.endsWith("al") && sFlex.length > 2 && _oSpellChecker.isValid(sFlex.slice(0,-1)+"ux")) {
                aSugg.add(sFlex.slice(0,-1)+"ux");
            }
            if (sFlex.endsWith("ail") && sFlex.length > 3 && _oSpellChecker.isValid(sFlex.slice(0,-2)+"ux")) {
                aSugg.add(sFlex.slice(0,-2)+"ux");
            }
        }
        if (_oSpellChecker.isValid(sFlex+"s")) {
            aSugg.add(sFlex+"s");
        }
        if (_oSpellChecker.isValid(sFlex+"x")) {
            aSugg.add(sFlex+"x");
        }
    }
    if (mfsp.hasMiscPlural(sFlex)) {
        mfsp.getMiscPlural(sFlex).forEach(function(x) { aSugg.add(x); });
    }
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

function suggSing (sFlex) {
    // returns singular forms assuming sFlex is plural
    if (sFlex.includes("-")) {
        return "";
    }
    let aSugg = new Set();
    if (sFlex.endsWith("ux")) {
        if (_oSpellChecker.isValid(sFlex.slice(0,-2)+"l")) {
            aSugg.add(sFlex.slice(0,-2)+"l");
        }
        if (_oSpellChecker.isValid(sFlex.slice(0,-2)+"il")) {
            aSugg.add(sFlex.slice(0,-2)+"il");
        }
    }
    if (_oSpellChecker.isValid(sFlex.slice(0,-1))) {
        aSugg.add(sFlex.slice(0,-1));
    }
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

Modified gc_lang/fr/modules-js/lexicographe.js from [be510450a4] to [e3263a5103].

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
    ['<', "inférieur à"],
    ['>', "supérieur à"],
]);


class Lexicographe {

    constructor (oDict, oTokenizer, oLocGraph) {
        this.oDict = oDict;
        this.oTokenizer = oTokenizer;
        this.oLocGraph = JSON.parse(oLocGraph);

        this._zPartDemForm = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(là|ci)$", "i");
        this._aPartDemExceptList = new Set(["celui", "celle", "ceux", "celles", "de", "jusque", "par", "marie-couche-toi"]);
        this._zInterroVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(t-(?:il|elle|on)|je|tu|ils?|elles?|on|[nv]ous)$", "i");
        this._zImperatifVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|y|en|[mts][’'](?:y|en)|les?|la|[mt]oi|leur|lui)$", "i");







|
|







222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
    ['<', "inférieur à"],
    ['>', "supérieur à"],
]);


class Lexicographe {

    constructor (oSpellChecker, oTokenizer, oLocGraph) {
        this.oSpellChecker = oSpellChecker;
        this.oTokenizer = oTokenizer;
        this.oLocGraph = JSON.parse(oLocGraph);

        this._zPartDemForm = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(là|ci)$", "i");
        this._aPartDemExceptList = new Set(["celui", "celle", "ceux", "celles", "de", "jusque", "par", "marie-couche-toi"]);
        this._zInterroVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(t-(?:il|elle|on)|je|tu|ils?|elles?|on|[nv]ous)$", "i");
        this._zImperatifVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|y|en|[mts][’'](?:y|en)|les?|la|[mt]oi|leur|lui)$", "i");
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
                            sValue: oToken.sValue,
                            aLabel: ["forme verbale interrogative"],
                            aSubElem: [
                                { sType: oToken.sType, sValue: m[1],       aLabel: this._getMorph(m[1]) },
                                { sType: oToken.sType, sValue: "-" + m[2], aLabel: [this._formatSuffix(m[2].toLowerCase())] }
                            ]
                        };
                    } else if (this.oDict.isValidToken(oToken.sValue)) {
                        return {
                            sType: oToken.sType,
                            sValue: oToken.sValue,
                            aLabel: this._getMorph(oToken.sValue)
                        };
                    } else {
                        return {







|







337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
                            sValue: oToken.sValue,
                            aLabel: ["forme verbale interrogative"],
                            aSubElem: [
                                { sType: oToken.sType, sValue: m[1],       aLabel: this._getMorph(m[1]) },
                                { sType: oToken.sType, sValue: "-" + m[2], aLabel: [this._formatSuffix(m[2].toLowerCase())] }
                            ]
                        };
                    } else if (this.oSpellChecker.isValidToken(oToken.sValue)) {
                        return {
                            sType: oToken.sType,
                            sValue: oToken.sValue,
                            aLabel: this._getMorph(oToken.sValue)
                        };
                    } else {
                        return {
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
            helpers.logerror(e);
        }
        return null;
    }

    _getMorph (sWord) {
        let aElem = [];
        for (let s of this.oDict.getMorph(sWord)) {
            if (s.includes(":")) aElem.push(this._formatTags(s));
        }
        if (aElem.length == 0) {
            aElem.push("mot inconnu du dictionnaire");
        }
        return aElem;
    }







|







360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
            helpers.logerror(e);
        }
        return null;
    }

    _getMorph (sWord) {
        let aElem = [];
        for (let s of this.oSpellChecker.getMorph(sWord)) {
            if (s.includes(":")) aElem.push(this._formatTags(s));
        }
        if (aElem.length == 0) {
            aElem.push("mot inconnu du dictionnaire");
        }
        return aElem;
    }

Modified gc_lang/fr/modules/gce_suggestions.py from [50fbeb414d] to [79835965e4].

155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        if sGender == ":m":
            return suggMasPlur(sFlex)
        elif sGender == ":f":
            return suggFemPlur(sFlex)
    aSugg = set()
    if "-" not in sFlex:
        if sFlex.endswith("l"):
            if sFlex.endswith("al") and len(sFlex) > 2 and _oDict.isValid(sFlex[:-1]+"ux"):
                aSugg.add(sFlex[:-1]+"ux")
            if sFlex.endswith("ail") and len(sFlex) > 3 and _oDict.isValid(sFlex[:-2]+"ux"):
                aSugg.add(sFlex[:-2]+"ux")
        if _oDict.isValid(sFlex+"s"):
            aSugg.add(sFlex+"s")
        if _oDict.isValid(sFlex+"x"):
            aSugg.add(sFlex+"x")
    if mfsp.hasMiscPlural(sFlex):
        aSugg.update(mfsp.getMiscPlural(sFlex))
    if aSugg:
        return "|".join(aSugg)
    return ""


def suggSing (sFlex):
    "returns singular forms assuming sFlex is plural"
    if "-" in sFlex:
        return ""
    aSugg = set()
    if sFlex.endswith("ux"):
        if _oDict.isValid(sFlex[:-2]+"l"):
            aSugg.add(sFlex[:-2]+"l")
        if _oDict.isValid(sFlex[:-2]+"il"):
            aSugg.add(sFlex[:-2]+"il")
    if _oDict.isValid(sFlex[:-1]):
        aSugg.add(sFlex[:-1])
    if aSugg:
        return "|".join(aSugg)
    return ""


def suggMasSing (sFlex, bSuggSimil=False):







|

|

|

|














|

|

|







155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        if sGender == ":m":
            return suggMasPlur(sFlex)
        elif sGender == ":f":
            return suggFemPlur(sFlex)
    aSugg = set()
    if "-" not in sFlex:
        if sFlex.endswith("l"):
            if sFlex.endswith("al") and len(sFlex) > 2 and _oSpellChecker.isValid(sFlex[:-1]+"ux"):
                aSugg.add(sFlex[:-1]+"ux")
            if sFlex.endswith("ail") and len(sFlex) > 3 and _oSpellChecker.isValid(sFlex[:-2]+"ux"):
                aSugg.add(sFlex[:-2]+"ux")
        if _oSpellChecker.isValid(sFlex+"s"):
            aSugg.add(sFlex+"s")
        if _oSpellChecker.isValid(sFlex+"x"):
            aSugg.add(sFlex+"x")
    if mfsp.hasMiscPlural(sFlex):
        aSugg.update(mfsp.getMiscPlural(sFlex))
    if aSugg:
        return "|".join(aSugg)
    return ""


def suggSing (sFlex):
    "returns singular forms assuming sFlex is plural"
    if "-" in sFlex:
        return ""
    aSugg = set()
    if sFlex.endswith("ux"):
        if _oSpellChecker.isValid(sFlex[:-2]+"l"):
            aSugg.add(sFlex[:-2]+"l")
        if _oSpellChecker.isValid(sFlex[:-2]+"il"):
            aSugg.add(sFlex[:-2]+"il")
    if _oSpellChecker.isValid(sFlex[:-1]):
        aSugg.add(sFlex[:-1])
    if aSugg:
        return "|".join(aSugg)
    return ""


def suggMasSing (sFlex, bSuggSimil=False):

Modified gc_lang/fr/modules/lexicographe.py from [7b36598d08] to [75ede82f17].

153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    "t'en": " (te) pronom personnel objet + (en) pronom adverbial",
    "s'en": " (se) pronom personnel objet + (en) pronom adverbial",
}


class Lexicographe:

    def __init__ (self, oDict):
        self.oDict = oDict
        self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
        self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
        self._zTag = re.compile("[:;/][\\w*][^:;/]*")

    def analyzeWord (self, sWord):
        try:
            if not sWord:







|
|







153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    "t'en": " (te) pronom personnel objet + (en) pronom adverbial",
    "s'en": " (se) pronom personnel objet + (en) pronom adverbial",
}


class Lexicographe:

    def __init__ (self, oSpellChecker):
        self.oSpellChecker = oSpellChecker
        self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
        self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
        self._zTag = re.compile("[:;/][\\w*][^:;/]*")

    def analyzeWord (self, sWord):
        try:
            if not sWord:
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
                sWord = m.group(2)
                aMorph.append( "{}’ : {}".format(m.group(1), _dPFX.get(m.group(1).lower(), "[?]")) )
            # mots composés
            m2 = self._zCompoundWord.match(sWord)
            if m2:
                sWord = m2.group(1)
            # Morphologies
            lMorph = self.oDict.getMorph(sWord)
            if len(lMorph) > 1:
                # sublist
                aMorph.append( (sWord, [ self.formatTags(s)  for s in lMorph  if ":" in s ]) )
            elif len(lMorph) == 1:
                aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
            else:
                aMorph.append( "{} :  inconnu du dictionnaire".format(sWord) )







|







179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
                sWord = m.group(2)
                aMorph.append( "{}’ : {}".format(m.group(1), _dPFX.get(m.group(1).lower(), "[?]")) )
            # mots composés
            m2 = self._zCompoundWord.match(sWord)
            if m2:
                sWord = m2.group(1)
            # Morphologies
            lMorph = self.oSpellChecker.getMorph(sWord)
            if len(lMorph) > 1:
                # sublist
                aMorph.append( (sWord, [ self.formatTags(s)  for s in lMorph  if ":" in s ]) )
            elif len(lMorph) == 1:
                aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
            else:
                aMorph.append( "{} :  inconnu du dictionnaire".format(sWord) )

Modified gc_lang/fr/webext/background.js from [f7965949be] to [1d15c00f44].

58
59
60
61
62
63
64



65
66
67
68
69
70
71
                }
                browser.runtime.sendMessage(e.data);
                break;
            case "setOptions":
            case "setOption":
                storeGCOptions(result);
                break;



            default:
                console.log("[background] Unknown command: " + sActionDone);
                console.log(e.data);
        }
    }
    catch (e) {
        showError(e);







>
>
>







58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
                }
                browser.runtime.sendMessage(e.data);
                break;
            case "setOptions":
            case "setOption":
                storeGCOptions(result);
                break;
            case "setDictionary":
                console.log("[background] " + sActionDone + ": " + result);
                break;
            default:
                console.log("[background] Unknown command: " + sActionDone);
                console.log(e.data);
        }
    }
    catch (e) {
        showError(e);
85
86
87
88
89
90
91

















92
93
94
95
96


97
98
99
100


101
102
103
104
105
106
107
    let dOptions = (dSavedOptions.hasOwnProperty("gc_options")) ? dSavedOptions.gc_options : null;
    xGCEWorker.postMessage({
        sCommand: "init",
        dParam: {sExtensionPath: browser.extension.getURL(""), dOptions: dOptions, sContext: "Firefox"},
        dInfo: {}
    });
}


















function init () {
    if (bChrome) {
        browser.storage.local.get("gc_options", initGrammarChecker);
        browser.storage.local.get("ui_options", initUIOptions);


        return;
    }
    browser.storage.local.get("gc_options").then(initGrammarChecker, showError);
    browser.storage.local.get("ui_options").then(initUIOptions, showError);


}

init();


browser.runtime.onInstalled.addListener(function (oDetails) {
    // launched at installation or update







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>





>
>




>
>







88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
    let dOptions = (dSavedOptions.hasOwnProperty("gc_options")) ? dSavedOptions.gc_options : null;
    xGCEWorker.postMessage({
        sCommand: "init",
        dParam: {sExtensionPath: browser.extension.getURL(""), dOptions: dOptions, sContext: "Firefox"},
        dInfo: {}
    });
}

function setSpellingDictionary (dSavedDictionary) {
    if (dSavedDictionary.hasOwnProperty("oExtendedDictionary")) {
        xGCEWorker.postMessage({
            sCommand: "setDictionary",
            dParam: { sType: "extended", oDict: dSavedDictionary["oExtendedDictionary"] },
            dInfo: {}
        });
    }
    else if (dSavedDictionary.hasOwnProperty("oPersonalDictionary")) {
        xGCEWorker.postMessage({
            sCommand: "setDictionary",
            dParam: { sType: "personal", oDict: dSavedDictionary["oPersonalDictionary"] },
            dInfo: {}
        });
    }
}

function init () {
    if (bChrome) {
        browser.storage.local.get("gc_options", initGrammarChecker);
        browser.storage.local.get("ui_options", initUIOptions);
        browser.storage.local.get("oExtendedDictionary", setSpellingDictionary);
        browser.storage.local.get("oPersonalDictionary", setSpellingDictionary);
        return;
    }
    browser.storage.local.get("gc_options").then(initGrammarChecker, showError);
    browser.storage.local.get("ui_options").then(initUIOptions, showError);
    browser.storage.local.get("oExtendedDictionary").then(setSpellingDictionary, showError);
    browser.storage.local.get("oPersonalDictionary").then(setSpellingDictionary, showError);
}

init();


browser.runtime.onInstalled.addListener(function (oDetails) {
    // launched at installation or update
131
132
133
134
135
136
137

138
139
140
141
142
143
144
        case "getOptions":
        case "getDefaultOptions":
        case "setOptions":
        case "setOption":
        case "resetOptions":
        case "textToTest":
        case "fullTests":

            xGCEWorker.postMessage(oRequest);
            break;
        case "openURL":
            browser.tabs.create({url: dParam.sURL});
            break;
        default:
            console.log("[background] Unknown command: " + sCommand);







>







155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
        case "getOptions":
        case "getDefaultOptions":
        case "setOptions":
        case "setOption":
        case "resetOptions":
        case "textToTest":
        case "fullTests":
        case "setDictionary":
            xGCEWorker.postMessage(oRequest);
            break;
        case "openURL":
            browser.tabs.create({url: dParam.sURL});
            break;
        default:
            console.log("[background] Unknown command: " + sCommand);

Modified gc_lang/fr/webext/gce_worker.js from [2a64cce6de] to [c20f81d8f3].

32
33
34
35
36
37
38

39
40
41
42
43
44
45
//console.log("[Worker] GC Engine Worker [start]");
//console.log(self);

importScripts("grammalecte/graphspell/helpers.js");
importScripts("grammalecte/graphspell/str_transform.js");
importScripts("grammalecte/graphspell/char_player.js");
importScripts("grammalecte/graphspell/ibdawg.js");

importScripts("grammalecte/text.js");
importScripts("grammalecte/graphspell/tokenizer.js");
importScripts("grammalecte/fr/conj.js");
importScripts("grammalecte/fr/mfsp.js");
importScripts("grammalecte/fr/phonet.js");
importScripts("grammalecte/fr/cregex.js");
importScripts("grammalecte/fr/gc_options.js");







>







32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
//console.log("[Worker] GC Engine Worker [start]");
//console.log(self);

importScripts("grammalecte/graphspell/helpers.js");
importScripts("grammalecte/graphspell/str_transform.js");
importScripts("grammalecte/graphspell/char_player.js");
importScripts("grammalecte/graphspell/ibdawg.js");
importScripts("grammalecte/graphspell/spellchecker.js");
importScripts("grammalecte/text.js");
importScripts("grammalecte/graphspell/tokenizer.js");
importScripts("grammalecte/fr/conj.js");
importScripts("grammalecte/fr/mfsp.js");
importScripts("grammalecte/fr/phonet.js");
importScripts("grammalecte/fr/cregex.js");
importScripts("grammalecte/fr/gc_options.js");
116
117
118
119
120
121
122



123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
            break;
        case "textToTest":
            textToTest(dParam.sText, dParam.sCountry, dParam.bDebug, dParam.bContext, dInfo);
            break;
        case "fullTests":
            fullTests(dInfo);
            break;



        case "getSpellSuggestions":
            getSpellSuggestions(dParam.sWord, dInfo);
            break;
        case "getListOfTokens":
            getListOfTokens(dParam.sText, dInfo);
            break;
        default:
            console.log("[Worker] Unknown command: " + sCommand);
            showData(e.data);
    }
}



let bInitDone = false;

let oDict = null;
let oTokenizer = null;
let oLxg = null;
let oTest = null;
let oLocution = null;


/*







>
>
>
















|







117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
            break;
        case "textToTest":
            textToTest(dParam.sText, dParam.sCountry, dParam.bDebug, dParam.bContext, dInfo);
            break;
        case "fullTests":
            fullTests(dInfo);
            break;
        case "setDictionary":
            setDictionary(dParam.sType, dParam.oDict, dInfo);
            break;
        case "getSpellSuggestions":
            getSpellSuggestions(dParam.sWord, dInfo);
            break;
        case "getListOfTokens":
            getListOfTokens(dParam.sText, dInfo);
            break;
        default:
            console.log("[Worker] Unknown command: " + sCommand);
            showData(e.data);
    }
}



let bInitDone = false;

let oSpellChecker = null;
let oTokenizer = null;
let oLxg = null;
let oTest = null;
let oLocution = null;


/*
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
        if (!bInitDone) {
            //console.log("[Worker] Loading… Extension path: " + sExtensionPath);
            conj.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/conj_data.json"));
            phonet.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/phonet_data.json"));
            mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json"));
            //console.log("[Worker] Modules have been initialized…");
            gc_engine.load(sContext, sExtensionPath+"grammalecte/graphspell/_dictionaries");
            oDict = gc_engine.getDictionary();
            oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json");
            oTokenizer = new Tokenizer("fr");

            oLocution =  helpers.loadFile(sExtensionPath + "/grammalecte/fr/locutions_data.json");

            oLxg = new Lexicographe(oDict, oTokenizer, oLocution);
            if (dOptions !== null) {
                gc_engine.setOptions(dOptions);
            }
            //tests();
            bInitDone = true;
        } else {
            console.log("[Worker] Already initialized…")







|





|







161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        if (!bInitDone) {
            //console.log("[Worker] Loading… Extension path: " + sExtensionPath);
            conj.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/conj_data.json"));
            phonet.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/phonet_data.json"));
            mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json"));
            //console.log("[Worker] Modules have been initialized…");
            gc_engine.load(sContext, sExtensionPath+"grammalecte/graphspell/_dictionaries");
            oSpellChecker = gc_engine.getSpellChecker();
            oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json");
            oTokenizer = new Tokenizer("fr");

            oLocution =  helpers.loadFile(sExtensionPath + "/grammalecte/fr/locutions_data.json");

            oLxg = new Lexicographe(oSpellChecker, oTokenizer, oLocution);
            if (dOptions !== null) {
                gc_engine.setOptions(dOptions);
            }
            //tests();
            bInitDone = true;
        } else {
            console.log("[Worker] Already initialized…")
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
}

function parseAndSpellcheck (sText, sCountry, bDebug, bContext, dInfo={}) {
    let i = 0;
    sText = sText.replace(/­/g, "").normalize("NFC");
    for (let sParagraph of text.getParagraph(sText)) {
        let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
        postMessage(createResponse("parseAndSpellcheck", {sParagraph: sParagraph, iParaNum: i, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, false));
        i += 1;
    }
    postMessage(createResponse("parseAndSpellcheck", null, dInfo, true));
}

function parseAndSpellcheck1 (sParagraph, sCountry, bDebug, bContext, dInfo={}) {
    sParagraph = sParagraph.replace(/­/g, "").normalize("NFC");
    let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
    postMessage(createResponse("parseAndSpellcheck1", {sParagraph: sParagraph, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, true));
}

function getOptions (dInfo={}) {
    postMessage(createResponse("getOptions", gc_engine.getOptions(), dInfo, true));
}








|









|







200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
}

function parseAndSpellcheck (sText, sCountry, bDebug, bContext, dInfo={}) {
    let i = 0;
    sText = sText.replace(/­/g, "").normalize("NFC");
    for (let sParagraph of text.getParagraph(sText)) {
        let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
        postMessage(createResponse("parseAndSpellcheck", {sParagraph: sParagraph, iParaNum: i, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, false));
        i += 1;
    }
    postMessage(createResponse("parseAndSpellcheck", null, dInfo, true));
}

function parseAndSpellcheck1 (sParagraph, sCountry, bDebug, bContext, dInfo={}) {
    sParagraph = sParagraph.replace(/­/g, "").normalize("NFC");
    let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
    postMessage(createResponse("parseAndSpellcheck1", {sParagraph: sParagraph, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, true));
}

function getOptions (dInfo={}) {
    postMessage(createResponse("getOptions", gc_engine.getOptions(), dInfo, true));
}

284
285
286
287
288
289
290
291
292























293
294
295
296
297

298
299


300
301
302
303
304
305
306
        console.log(sRes);
    }
    gc_engine.setOptions(dMemoOptions);
    postMessage(createResponse("fullTests", sMsg, dInfo, true));
}


// Spellchecker
























function getSpellSuggestions (sWord, dInfo) {
    if (!oDict) {
        postMessage(createResponse("getSpellSuggestions", "# Error. Dictionary not loaded.", dInfo, true));
        return;
    }

    let aSugg = oDict.suggest(sWord);
    postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg}, dInfo, true));


}


// Lexicographer

function getListOfTokens (sText, dInfo={}) {
    try {







|

>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

|
|


>
|
|
>
>







288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
        console.log(sRes);
    }
    gc_engine.setOptions(dMemoOptions);
    postMessage(createResponse("fullTests", sMsg, dInfo, true));
}


// SpellChecker

function setDictionary (sType, oDict, dInfo) {
    if (!oSpellChecker) {
        postMessage(createResponse("setDictionary", "# Error. SpellChecker not loaded.", dInfo, true));
        return;
    }
    switch (sType) {
        case "main":
            oSpellChecker.setMainDictionary(oDict);
            postMessage(createResponse("setDictionary", true, dInfo, true));
            break;
        case "extended":
            oSpellChecker.setExtendedDictionary(oDict);
            postMessage(createResponse("setDictionary", true, dInfo, true));
            break;
        case "personal":
            oSpellChecker.setPersonalDictionary(oDict);
            postMessage(createResponse("setDictionary", true, dInfo, true));
            break;
        default:
            console.log("[worker] setDictionary: Unknown command");
    }
}

function getSpellSuggestions (sWord, dInfo) {
    if (!oSpellChecker) {
        postMessage(createResponse("getSpellSuggestions", "# Error. SpellChecker not loaded.", dInfo, true));
        return;
    }
    let i = 1;
    for (let aSugg of oSpellChecker.suggest(sWord)) {
        postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg, iSugg: i}, dInfo, true));
        i += 1;
    }
}


// Lexicographer

function getListOfTokens (sText, dInfo={}) {
    try {

Modified gc_lang/fr/webext/panel/lex_editor.js from [629a2b4b0e] to [2142dc301a].

568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607

608
609
610
611
612
613
614
615
616
617
618

const oBinaryDict = {
    
    oIBDAWG: null,

    load: function () {
        if (bChrome) {
            browser.storage.local.get("oDictionary", this._load);
            return;
        }
        let xPromise = browser.storage.local.get("oDictionary");
        xPromise.then(this._load.bind(this), showError);
    },

    _load: function (oResult) {
        if (!oResult.hasOwnProperty("oDictionary")) {
            oWidgets.hideElement("export_button");
            return;
        }
        let oJSON = oResult.oDictionary;
        this.oIBDAWG = new IBDAWG(oJSON);
        let lEntry = [];
        for (let s of this.oIBDAWG.select()) {
            lEntry.push(s.split("\t"));
        }        
        oLexicon.set(lEntry);
        oWidgets.setDictData(this.oIBDAWG.nEntry, this.oIBDAWG.sDate);
        oWidgets.showElement("export_button");
    },

    build: function (lEntry) {
        oWidgets.showElement("build_progress");
        let xProgressNode = document.getElementById("build_progress");
        let oDAWG = new DAWG(lEntry, "S", "fr", "Français", "Dictionnaire personnel", xProgressNode);
        let oJSON = oDAWG.createBinaryJSON(1);
        this.save(oJSON);
        this.oIBDAWG = new IBDAWG(oJSON);
        oWidgets.setDictData(this.oIBDAWG.nEntry, this.oIBDAWG.sDate);
        oWidgets.hideElement("build_progress");
        oWidgets.showElement("export_button");

    },

    save: function (oJSON) {
        browser.storage.local.set({ "oDictionary": oJSON });
    },

    import: function () {
        // TO DO
    },

    export: function () {







|


|




|



|




















>



|







568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619

const oBinaryDict = {
    
    oIBDAWG: null,

    load: function () {
        if (bChrome) {
            browser.storage.local.get("oPersonalDictionary", this._load);
            return;
        }
        let xPromise = browser.storage.local.get("oPersonalDictionary");
        xPromise.then(this._load.bind(this), showError);
    },

    _load: function (oResult) {
        if (!oResult.hasOwnProperty("oPersonalDictionary")) {
            oWidgets.hideElement("export_button");
            return;
        }
        let oJSON = oResult.oPersonalDictionary;
        this.oIBDAWG = new IBDAWG(oJSON);
        let lEntry = [];
        for (let s of this.oIBDAWG.select()) {
            lEntry.push(s.split("\t"));
        }        
        oLexicon.set(lEntry);
        oWidgets.setDictData(this.oIBDAWG.nEntry, this.oIBDAWG.sDate);
        oWidgets.showElement("export_button");
    },

    build: function (lEntry) {
        oWidgets.showElement("build_progress");
        let xProgressNode = document.getElementById("build_progress");
        let oDAWG = new DAWG(lEntry, "S", "fr", "Français", "Dictionnaire personnel", xProgressNode);
        let oJSON = oDAWG.createBinaryJSON(1);
        this.save(oJSON);
        this.oIBDAWG = new IBDAWG(oJSON);
        oWidgets.setDictData(this.oIBDAWG.nEntry, this.oIBDAWG.sDate);
        oWidgets.hideElement("build_progress");
        oWidgets.showElement("export_button");
        browser.runtime.sendMessage({ sCommand: "setDictionary", dParam: {sType: "personal", oDict: oJSON}, dInfo: {} });
    },

    save: function (oJSON) {
        browser.storage.local.set({ "oPersonalDictionary": oJSON });
    },

    import: function () {
        // TO DO
    },

    export: function () {

Modified grammalecte-cli.py from [07800caa2b] to [d66b04b565].

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54


55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
    if sys.platform == "win32":
        # Apparently, the console transforms «’» in «'».
        # So we reverse it to avoid many useless warnings.
        sText = sText.replace("'", "’")
    return sText


def _getErrors (sText, oTokenizer, oDict, bContext=False, bSpellSugg=False, bDebug=False):
    "returns a tuple: (grammar errors, spelling errors)"
    aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
            if bSpellSugg:
                dToken['aSuggestions'] = oDict.suggest(dToken['sValue'])


            aSpellErrs.append(dToken)
    return aGrammErrs, aSpellErrs


def generateText (sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bSpellSugg, bDebug)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)


def generateJSON (iIndex, sText, oTokenizer, oDict, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, bContext, bSpellSugg, bDebug)
    aGrammErrs = list(aGrammErrs)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    if lLineSet:
        aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
        return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    if bReturnText:







|




|

|
>
>




|
|





|
|







40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
    if sys.platform == "win32":
        # Apparently, the console transforms «’» in «'».
        # So we reverse it to avoid many useless warnings.
        sText = sText.replace("'", "’")
    return sText


def _getErrors (sText, oTokenizer, oSpellChecker, bContext=False, bSpellSugg=False, bDebug=False):
    "returns a tuple: (grammar errors, spelling errors)"
    aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
            if bSpellSugg:
                dToken['aSuggestions'] = []
                for lSugg in oSpellChecker.suggest(dToken['sValue']):
                    dToken['aSuggestions'].extend(lSugg)
            aSpellErrs.append(dToken)
    return aGrammErrs, aSpellErrs


def generateText (sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, False, bSpellSugg, bDebug)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)


def generateJSON (iIndex, sText, oTokenizer, oSpellChecker, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, bContext, bSpellSugg, bDebug)
    aGrammErrs = list(aGrammErrs)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    if lLineSet:
        aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
        return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    if bReturnText:
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
    xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
    xArgs = xParser.parse_args()

    gce.load()
    if not xArgs.json:
        echo("Grammalecte v{}".format(gce.version))
    oDict = gce.getDictionary()
    oTokenizer = tkz.Tokenizer("fr")
    oLexGraphe = lxg.Lexicographe(oDict)
    if xArgs.textformatter or xArgs.textformatteronly:
        oTF = tf.TextFormatter()

    if xArgs.list_options or xArgs.list_rules:
        if xArgs.list_options:
            gce.displayOptions("fr")
        if xArgs.list_rules:
            gce.displayRules(None  if xArgs.list_rules == "*"  else xArgs.list_rules)
        exit()

    if xArgs.suggest:
        lSugg = oDict.suggest(xArgs.suggest)
        if xArgs.json:
            sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
        else:
            sText = "Suggestions : " + " | ".join(lSugg)
        echo(sText)
        exit()

    if not xArgs.json:
        xArgs.context = False

    gce.setOptions({"html": True, "latex": True})
    if xArgs.opt_on:







|

|











|
|
|
|
|
|







128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
    xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
    xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
    xArgs = xParser.parse_args()

    gce.load()
    if not xArgs.json:
        echo("Grammalecte v{}".format(gce.version))
    oSpellChecker = gce.getSpellChecker()
    oTokenizer = tkz.Tokenizer("fr")
    oLexGraphe = lxg.Lexicographe(oSpellChecker)
    if xArgs.textformatter or xArgs.textformatteronly:
        oTF = tf.TextFormatter()

    if xArgs.list_options or xArgs.list_rules:
        if xArgs.list_options:
            gce.displayOptions("fr")
        if xArgs.list_rules:
            gce.displayRules(None  if xArgs.list_rules == "*"  else xArgs.list_rules)
        exit()

    if xArgs.suggest:
        for lSugg in oSpellChecker.suggest(xArgs.suggest):
            if xArgs.json:
                sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
            else:
                sText = "Suggestions : " + " | ".join(lSugg)
            echo(sText)
        exit()

    if not xArgs.json:
        xArgs.context = False

    gce.setOptions({"html": True, "latex": True})
    if xArgs.opt_on:
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
            for i, sText in enumerate(readfile(sFile), 1):
                if xArgs.textformatter or xArgs.textformatteronly:
                    sText = oTF.formatText(sText)
                if xArgs.textformatteronly:
                    output(sText, hDst)
                else:
                    if xArgs.json:
                        sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
                    else:
                        sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                    if sText:
                        if xArgs.json and bComma:
                            output(",\n", hDst)
                        output(sText, hDst)
                        bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        else:
            # concaténation des lignes non séparées par une ligne vide
            for i, lLine in enumerate(readfileAndConcatLines(sFile), 1):
                sText, lLineSet = txt.createParagraphWithLines(lLine)
                if xArgs.json:
                    sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
                else:
                    sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                if sText:
                    if xArgs.json and bComma:
                        output(",\n", hDst)
                    output(sText, hDst)
                    bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        if xArgs.json:
            output("\n]}\n", hDst)
    else:
        # pseudo-console
        sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n"
        sText = _getText(sInputText)
        while True:
            if sText.startswith("?"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo("* " + sWord)
                        for sMorph in oDict.getMorph(sWord):
                            echo("  {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
            elif sText.startswith("!"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo(" | ".join(oDict.suggest(sWord)))
                        #echo(" | ".join(oDict.suggest2(sWord)))
            elif sText.startswith(">"):
                oDict.drawPath(sText[1:].strip())
            elif sText.startswith("="):
                for sRes in oDict.select(sText[1:].strip()):
                    echo(sRes)
            elif sText.startswith("/+ "):
                gce.setOptions({ opt:True  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
            elif sText.startswith("/- "):
                gce.setOptions({ opt:False  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")







|

|












|

|


















|




|
|

|

|







179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
            for i, sText in enumerate(readfile(sFile), 1):
                if xArgs.textformatter or xArgs.textformatteronly:
                    sText = oTF.formatText(sText)
                if xArgs.textformatteronly:
                    output(sText, hDst)
                else:
                    if xArgs.json:
                        sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
                    else:
                        sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                    if sText:
                        if xArgs.json and bComma:
                            output(",\n", hDst)
                        output(sText, hDst)
                        bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        else:
            # concaténation des lignes non séparées par une ligne vide
            for i, lLine in enumerate(readfileAndConcatLines(sFile), 1):
                sText, lLineSet = txt.createParagraphWithLines(lLine)
                if xArgs.json:
                    sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
                else:
                    sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                if sText:
                    if xArgs.json and bComma:
                        output(",\n", hDst)
                    output(sText, hDst)
                    bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        if xArgs.json:
            output("\n]}\n", hDst)
    else:
        # pseudo-console
        sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n"
        sText = _getText(sInputText)
        while True:
            if sText.startswith("?"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo("* " + sWord)
                        for sMorph in oSpellChecker.getMorph(sWord):
                            echo("  {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
            elif sText.startswith("!"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        for lSugg in oSpellChecker.suggest(sWord):
                            echo(" | ".join(lSugg))
            elif sText.startswith(">"):
                oSpellChecker.drawPath(sText[1:].strip())
            elif sText.startswith("="):
                for sRes in oSpellChecker.select(sText[1:].strip()):
                    echo(sRes)
            elif sText.startswith("/+ "):
                gce.setOptions({ opt:True  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
            elif sText.startswith("/- "):
                gce.setOptions({ opt:False  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
            elif sText.startswith("/rl"):
                # reload (todo)
                pass
            else:
                for sParagraph in txt.getParagraph(sText):
                    if xArgs.textformatter:
                        sText = oTF.formatText(sText)
                    sRes = generateText(sText, oTokenizer, oDict, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
                    if sRes:
                        echo("\n" + sRes)
                    else:
                        echo("\nNo error found.")
            sText = _getText(sInputText)


if __name__ == '__main__':
    main()







|









264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
            elif sText.startswith("/rl"):
                # reload (todo)
                pass
            else:
                for sParagraph in txt.getParagraph(sText):
                    if xArgs.textformatter:
                        sText = oTF.formatText(sText)
                    sRes = generateText(sText, oTokenizer, oSpellChecker, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
                    if sRes:
                        echo("\n" + sRes)
                    else:
                        echo("\nNo error found.")
            sText = _getText(sInputText)


if __name__ == '__main__':
    main()

Modified grammalecte-server.py from [6dbdf10c60] to [3253a2b2ff].

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def genUserId ():
    i = 0
    while True:
        yield str(i)
        i += 1


def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False):
    aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
    aGrammErrs = list(aGrammErrs)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
            aSpellErrs.append(dToken)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return "  " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    

if __name__ == '__main__':

    gce.load("Server")
    echo("Grammalecte v{}".format(gce.version))
    dServerOptions = getServerOptions()
    dGCOptions = getConfigOptions("fr")
    if dGCOptions:
        gce.setOptions(dGCOptions)
    dServerGCOptions = gce.getOptions()
    echo("Grammar options:\n" + " | ".join([ k + ": " + str(v)  for k, v in sorted(dServerGCOptions.items()) ]))
    oDict = gce.getDictionary()
    oTokenizer = tkz.Tokenizer("fr")
    oTF = tf.TextFormatter()
    dUser = {}
    userGenerator = genUserId()

    app = Bottle()








|




|
















|







125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def genUserId ():
    i = 0
    while True:
        yield str(i)
        i += 1


def parseParagraph (iParagraph, sText, oTokenizer, oSpellChecker, dOptions, bDebug=False, bEmptyIfNoErrors=False):
    aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
    aGrammErrs = list(aGrammErrs)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
            aSpellErrs.append(dToken)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return "  " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    

if __name__ == '__main__':

    gce.load("Server")
    echo("Grammalecte v{}".format(gce.version))
    dServerOptions = getServerOptions()
    dGCOptions = getConfigOptions("fr")
    if dGCOptions:
        gce.setOptions(dGCOptions)
    dServerGCOptions = gce.getOptions()
    echo("Grammar options:\n" + " | ".join([ k + ": " + str(v)  for k, v in sorted(dServerGCOptions.items()) ]))
    oSpellChecker = gce.getSpellChecker()
    oTokenizer = tkz.Tokenizer("fr")
    oTF = tf.TextFormatter()
    dUser = {}
    userGenerator = genUserId()

    app = Bottle()

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
                dOptions.update(json.loads(request.forms.options))
            except:
                sError = "request options not used"
        sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n'
        for i, sText in enumerate(txt.getParagraph(request.forms.text), 1):
            if bTF:
                sText = oTF.formatText(sText)
            sText = parseParagraph(i, sText, oTokenizer, oDict, dOptions, bEmptyIfNoErrors=True)
            if sText:
                if bComma:
                    sJSON += ",\n"
                sJSON += sText
                bComma = True
        sJSON += "\n]}\n"
        return sJSON







|







195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
                dOptions.update(json.loads(request.forms.options))
            except:
                sError = "request options not used"
        sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n'
        for i, sText in enumerate(txt.getParagraph(request.forms.text), 1):
            if bTF:
                sText = oTF.formatText(sText)
            sText = parseParagraph(i, sText, oTokenizer, oSpellChecker, dOptions, bEmptyIfNoErrors=True)
            if sText:
                if bComma:
                    sJSON += ",\n"
                sJSON += sText
                bComma = True
        sJSON += "\n]}\n"
        return sJSON

Modified graphspell-js/ibdawg.js from [cea88b04de] to [6c0fce95e9].

85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    // INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH

    constructor (param1, sPath="") {
        // param1 can be a filename or a object with all the necessary data.
        try {
            let oData = null;
            if (typeof(param1) == "string") {
                let sDicName = param1;
                let sURL = (sPath !== "") ? sPath + "/" + sDicName : "resource://grammalecte/graphspell/_dictionaries/"+sDicName;
                oData = JSON.parse(helpers.loadFile(sURL));
            } else {
                oData = param1;
            }
            Object.assign(this, oData);
        }
        catch (e) {







<
|







85
86
87
88
89
90
91

92
93
94
95
96
97
98
99
    // INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH

    constructor (param1, sPath="") {
        // param1 can be a filename or a object with all the necessary data.
        try {
            let oData = null;
            if (typeof(param1) == "string") {

                let sURL = (sPath !== "") ? sPath + "/" + param1 : "resource://grammalecte/graphspell/_dictionaries/"+param1;
                oData = JSON.parse(helpers.loadFile(sURL));
            } else {
                oData = param1;
            }
            Object.assign(this, oData);
        }
        catch (e) {
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
        return Boolean(this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask);
    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let l = this.morph(sWord);
        if (sWord[0].gl_isUpperCase()) {
            l = l.concat(this.morph(sWord.toLowerCase()));
            if (sWord.gl_isUpperCase() && sWord.length > 1) {
                l = l.concat(this.morph(sWord.gl_toCapitalize()));
            }
        }
        return l;
    }

    suggest (sWord, nSuggLimit=10) {
        // returns a array of suggestions for <sWord>







|

|







278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
        return Boolean(this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask);
    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let l = this.morph(sWord);
        if (sWord[0].gl_isUpperCase()) {
            l.push(...this.morph(sWord.toLowerCase()));
            if (sWord.gl_isUpperCase() && sWord.length > 1) {
                l.push(...this.morph(sWord.gl_toCapitalize()));
            }
        }
        return l;
    }

    suggest (sWord, nSuggLimit=10) {
        // returns a array of suggestions for <sWord>

Modified graphspell-js/spellchecker.js from [ee0b03faf8] to [61ed7ab1e9].

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

const dDefaultDictionaries = new Map([
    ["fr", "fr.json"],
    ["en", "en.json"]
]);


class Spellchecker {

    constructor (sLangCode, mainDic=null, extentedDic=null, personalDic=null, sPath="") {
        // returns true if the main dictionary is loaded
        this.sLangCode = sLangCode;
        console.log(sLangCode);
        console.log(mainDic);
        if (mainDic === null) {
            mainDic = dDefaultDictionaries.gl_get(sLangCode, "");
        }
        this.oMainDic = this._loadDictionary(mainDic, sPath, true);
        this.oExtendedDic = this._loadDictionary(extentedDic, sPath);
        this.oPersonalDic = this._loadDictionary(personalDic, sPath);
    }

    _loadDictionary (dictionary, sPath, bNecessary=false) {
        // returns an IBDAWG object
        if (dictionary === null) {
            return null;
        }
        try {
        	if (typeof(require) !== 'undefined') {
        		console.log(">>>> <resource:>");
                return new ibdawg.IBDAWG(dictionary);  // dictionary can be a filename or a JSON object
            } else {
            	console.log(">>>> no <resource:>");
                return new IBDAWG(dictionary, sPath);  // dictionary can be a filename or a JSON object
            }
        }
        catch (e) {

        	if (bNecessary) {
        		throw e.message;
        	}
            console.log(e.message);
            return null;
        }
    }

    setMainDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oMainDic = this._loadDictionary(dictionary);
        return bool(this.oMainDic);
    }

    setExtendedDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oExtendedDic = this._loadDictionary(dictionary);
        return bool(this.oExtendedDic);
    }

    setPersonalDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oPersonalDic = this._loadDictionary(dictionary);
        return bool(this.oPersonalDic);
    }

    // IBDAWG functions

    isValidToken (sToken) {
        // checks if sToken is valid (if there is hyphens in sToken, sToken is split, each part is checked)
        if (this.oMainDic.isValidToken(sToken)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.isValidToken(sToken)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.isValidToken(sToken)) {
            return true;
        }
        return false;
    }

    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (this.oMainDic.isValid(sToken)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.isValid(sToken)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.isValid(sToken)) {
            return true;
        }
        return false;
    }

    lookup (sWord) {
        // checks if sWord is in dictionary as is (strict verification)
        if (this.oMainDic.lookup(sToken)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.lookup(sToken)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.lookup(sToken)) {
            return true;
        }
        return false;
    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let lResult = this.oMainDic.getMorph(sToken);
        if (this.oExtendedDic) {
            lResult.extends(this.oExtendedDic.getMorph(sToken));
        }
        if (this.oPersonalDic) {
            lResult.extends(this.oPersonalDic.getMorph(sToken));
        }
        return lResult;
    }

    * suggest (sWord, nSuggLimit=10) {
        // generator: returns 1,2 or 3 lists of suggestions
        yield this.oMainDic.suggest(sWord, nSuggLimit);
        if (this.oExtendedDic) {
            yield this.oExtendedDic.suggest(sWord, nSuggLimit);
        }
        if (this.oPersonalDic) {
            yield this.oPersonalDic.suggest(sWord, nSuggLimit);
        }







|

|


<
<














|
<


<




>
|
|
|
<







|





|





|




















|


|


|







|


|


|







|

|


|





|







21
22
23
24
25
26
27
28
29
30
31
32


33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

48
49

50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144

const dDefaultDictionaries = new Map([
    ["fr", "fr.json"],
    ["en", "en.json"]
]);


class SpellChecker {

    constructor (sLangCode, sPath="", mainDic=null, extentedDic=null, personalDic=null) {
        // returns true if the main dictionary is loaded
        this.sLangCode = sLangCode;


        if (mainDic === null) {
            mainDic = dDefaultDictionaries.gl_get(sLangCode, "");
        }
        this.oMainDic = this._loadDictionary(mainDic, sPath, true);
        this.oExtendedDic = this._loadDictionary(extentedDic, sPath);
        this.oPersonalDic = this._loadDictionary(personalDic, sPath);
    }

    _loadDictionary (dictionary, sPath, bNecessary=false) {
        // returns an IBDAWG object
        if (dictionary === null) {
            return null;
        }
        try {
            if (typeof(require) !== 'undefined') {

                return new ibdawg.IBDAWG(dictionary);  // dictionary can be a filename or a JSON object
            } else {

                return new IBDAWG(dictionary, sPath);  // dictionary can be a filename or a JSON object
            }
        }
        catch (e) {
            console.log(e.message);
            if (bNecessary) {
                throw e.message;
            }

            return null;
        }
    }

    setMainDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oMainDic = this._loadDictionary(dictionary);
        return Boolean(this.oMainDic);
    }

    setExtendedDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oExtendedDic = this._loadDictionary(dictionary);
        return Boolean(this.oExtendedDic);
    }

    setPersonalDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oPersonalDic = this._loadDictionary(dictionary);
        return Boolean(this.oPersonalDic);
    }

    // IBDAWG functions

    isValidToken (sToken) {
        // checks if sToken is valid (if there is hyphens in sToken, sToken is split, each part is checked)
        if (this.oMainDic.isValidToken(sToken)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.isValidToken(sToken)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.isValidToken(sToken)) {
            return true;
        }
        return false;
    }

    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (this.oMainDic.isValid(sWord)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.isValid(sWord)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.isValid(sWord)) {
            return true;
        }
        return false;
    }

    lookup (sWord) {
        // checks if sWord is in dictionary as is (strict verification)
        if (this.oMainDic.lookup(sWord)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.lookup(sWord)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.lookup(sWord)) {
            return true;
        }
        return false;
    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let lResult = this.oMainDic.getMorph(sWord);
        if (this.oExtendedDic) {
            lResult.push(...this.oExtendedDic.getMorph(sWord));
        }
        if (this.oPersonalDic) {
            lResult.push(...this.oPersonalDic.getMorph(sWord));
        }
        return lResult;
    }

    * suggest (sWord, nSuggLimit=10) {
        // generator: returns 1, 2 or 3 lists of suggestions
        yield this.oMainDic.suggest(sWord, nSuggLimit);
        if (this.oExtendedDic) {
            yield this.oExtendedDic.suggest(sWord, nSuggLimit);
        }
        if (this.oPersonalDic) {
            yield this.oPersonalDic.suggest(sWord, nSuggLimit);
        }

Modified graphspell-js/tokenizer.js from [d6429837c4] to [c3f0ee8c90].

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
                }
            }
            i += nCut;
            sText = sText.slice(nCut);
        }
    }

    getSpellingErrors (sText, oDict) {
        let aSpellErr = [];
        for (let oToken of this.genTokens(sText)) {
            if (oToken.sType === 'WORD' && !oDict.isValidToken(oToken.sValue)) {
                aSpellErr.push(oToken);
            }
        }
        return aSpellErr;
    }
}


if (typeof(exports) !== 'undefined') {
    exports.Tokenizer = Tokenizer;
}







|


|











84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
                }
            }
            i += nCut;
            sText = sText.slice(nCut);
        }
    }

    getSpellingErrors (sText, oSpellChecker) {
        let aSpellErr = [];
        for (let oToken of this.genTokens(sText)) {
            if (oToken.sType === 'WORD' && !oSpellChecker.isValidToken(oToken.sValue)) {
                aSpellErr.push(oToken);
            }
        }
        return aSpellErr;
    }
}


if (typeof(exports) !== 'undefined') {
    exports.Tokenizer = Tokenizer;
}

Modified graphspell/ibdawg.py from [6527ad55bf] to [3bf18d8144].

160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
        self.nNode = int(l.pop(0))
        self.nArc = int(l.pop(0))
        self.nAff = int(l.pop(0))
        self.cStemming = l.pop(0)
        self.nTag = self.nArcVal - self.nChar - self.nAff
        # <dChar> to get the value of an arc, <dCharVal> to get the char of an arc with its value
        self.dChar = {}
        for i in range(1, self.nChar):
            self.dChar[self.lArcVal[i]] = i
        self.dCharVal = { v: k  for k, v in self.dChar.items() }
        self.nBytesOffset = 1 # version 3

    def _initJSON (self):
        "initialize with a JSON text file"
        self.__dict__.update(json.loads(self.by.decode("utf-8")))







|







160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
        self.nNode = int(l.pop(0))
        self.nArc = int(l.pop(0))
        self.nAff = int(l.pop(0))
        self.cStemming = l.pop(0)
        self.nTag = self.nArcVal - self.nChar - self.nAff
        # <dChar> to get the value of an arc, <dCharVal> to get the char of an arc with its value
        self.dChar = {}
        for i in range(1, self.nChar+1):
            self.dChar[self.lArcVal[i]] = i
        self.dCharVal = { v: k  for k, v in self.dChar.items() }
        self.nBytesOffset = 1 # version 3

    def _initJSON (self):
        "initialize with a JSON text file"
        self.__dict__.update(json.loads(self.by.decode("utf-8")))

Modified graphspell/spellchecker.py from [19c7dd4df2] to [c7a5786ac5].

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

dDefaultDictionaries = {
    "fr": "fr.bdic",
    "en": "en.bdic"
}


class Spellchecker ():

    def __init__ (self, sLangCode, sfMainDic="", sfExtendedDic="", sfPersonalDic=""):
        "returns True if the main dictionary is loaded"
        self.sLangCode = sLangCode
        if not sfMainDic:
            sfMainDic = dDefaultDictionaries.get(sLangCode, "")
        self.oMainDic = self._loadDictionary(sfMainDic)
        self.oExtendedDic = self._loadDictionary(sfExtendedDic)
        self.oPersonalDic = self._loadDictionary(sfPersonalDic)
        return bool(self.oMainDic)

    def _loadDictionary (self, sfDictionary):
        "returns an IBDAWG object"
        if not sfDictionary:
            return None
        try:
            return ibdawg.IBDAWG(sfDictionary)







|









<







15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

32
33
34
35
36
37
38

dDefaultDictionaries = {
    "fr": "fr.bdic",
    "en": "en.bdic"
}


class SpellChecker ():

    def __init__ (self, sLangCode, sfMainDic="", sfExtendedDic="", sfPersonalDic=""):
        "returns True if the main dictionary is loaded"
        self.sLangCode = sLangCode
        if not sfMainDic:
            sfMainDic = dDefaultDictionaries.get(sLangCode, "")
        self.oMainDic = self._loadDictionary(sfMainDic)
        self.oExtendedDic = self._loadDictionary(sfExtendedDic)
        self.oPersonalDic = self._loadDictionary(sfPersonalDic)


    def _loadDictionary (self, sfDictionary):
        "returns an IBDAWG object"
        if not sfDictionary:
            return None
        try:
            return ibdawg.IBDAWG(sfDictionary)
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116









            return True
        if self.oPersonalDic and self.oPersonalDic.isValidToken(sToken):
            return True
        return False

    def isValid (self, sWord):
        "checks if sWord is valid (different casing tested if the first letter is a capital)"
        if self.oMainDic.isValid(sToken):
            return True
        if self.oExtendedDic and self.oExtendedDic.isValid(sToken):
            return True
        if self.oPersonalDic and self.oPersonalDic.isValid(sToken):
            return True
        return False

    def lookup (self, sWord):
        "checks if sWord is in dictionary as is (strict verification)"
        if self.oMainDic.lookup(sToken):
            return True
        if self.oExtendedDic and self.oExtendedDic.lookup(sToken):
            return True
        if self.oPersonalDic and self.oPersonalDic.lookup(sToken):
            return True
        return False

    def getMorph (self, sWord):
        "retrieves morphologies list, different casing allowed"
        lResult = self.oMainDic.getMorph(sToken)
        if self.oExtendedDic:
            lResult.extends(self.oExtendedDic.getMorph(sToken))
        if self.oPersonalDic:
            lResult.extends(self.oPersonalDic.getMorph(sToken))
        return lResult

    def suggest (self, sWord, nSuggLimit=10):
        "generator: returns 1,2 or 3 lists of suggestions"
        yield self.oMainDic.suggest(sWord, nSuggLimit)
        if self.oExtendedDic:
            yield self.oExtendedDic.suggest(sWord, nSuggLimit)
        if self.oPersonalDic:
            yield self.oPersonalDic.suggest(sWord, nSuggLimit)

    def select (self, sPattern=""):
        "generator: returns all entries which morphology fits <sPattern>"
        yield from self.oMainDic.select(sPattern)
        if self.oExtendedDic:
            yield from self.oExtendedDic.select(sPattern)
        if self.oPersonalDic:
            yield from self.oPersonalDic.select(sPattern)
















|

|

|





|

|

|





|

|

|



|













>
>
>
>
>
>
>
>
>
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
            return True
        if self.oPersonalDic and self.oPersonalDic.isValidToken(sToken):
            return True
        return False

    def isValid (self, sWord):
        "checks if sWord is valid (different casing tested if the first letter is a capital)"
        if self.oMainDic.isValid(sWord):
            return True
        if self.oExtendedDic and self.oExtendedDic.isValid(sWord):
            return True
        if self.oPersonalDic and self.oPersonalDic.isValid(sWord):
            return True
        return False

    def lookup (self, sWord):
        "checks if sWord is in dictionary as is (strict verification)"
        if self.oMainDic.lookup(sWord):
            return True
        if self.oExtendedDic and self.oExtendedDic.lookup(sWord):
            return True
        if self.oPersonalDic and self.oPersonalDic.lookup(sWord):
            return True
        return False

    def getMorph (self, sWord):
        "retrieves morphologies list, different casing allowed"
        lResult = self.oMainDic.getMorph(sWord)
        if self.oExtendedDic:
            lResult.extends(self.oExtendedDic.getMorph(sWord))
        if self.oPersonalDic:
            lResult.extends(self.oPersonalDic.getMorph(sWord))
        return lResult

    def suggest (self, sWord, nSuggLimit=10):
        "generator: returns 1, 2 or 3 lists of suggestions"
        yield self.oMainDic.suggest(sWord, nSuggLimit)
        if self.oExtendedDic:
            yield self.oExtendedDic.suggest(sWord, nSuggLimit)
        if self.oPersonalDic:
            yield self.oPersonalDic.suggest(sWord, nSuggLimit)

    def select (self, sPattern=""):
        "generator: returns all entries which morphology fits <sPattern>"
        yield from self.oMainDic.select(sPattern)
        if self.oExtendedDic:
            yield from self.oExtendedDic.select(sPattern)
        if self.oPersonalDic:
            yield from self.oPersonalDic.select(sPattern)

    def drawPath (self, sWord):
        self.oMainDic.drawPath(sWord)
        if self.oExtendedDic:
            print("-----")
            self.oExtendedDic.drawPath(sWord)
        if self.oPersonalDic:
            print("-----")
            self.oPersonalDic.drawPath(sWord)