Grammalecte  Changes On Branch ae469535bca09f05

Changes In Branch multid Through [ae469535bc] Excluding Merge-Ins

This is equivalent to a diff from 62c389aedf to ae469535bc

2018-02-18
10:13
[tb] merge trunk: fix bugs check-in: bce8dea5b0 user: olr tags: tb, multid
2018-02-17
11:50
[build][graphspell][core] build with options to integrate extended and personal dictionaries check-in: e0d79cdbf4 user: olr tags: trunk, core, build, graphspell
11:31
[graphspell][js][bug] spellchecker: dictionary not always a filename string check-in: ae469535bc user: olr tags: graphspell, multid
10:13
[graphspell] spellchecker: handling exception when loading dictionary file check-in: c075441c64 user: olr tags: graphspell, multid
2018-02-16
15:46
[fx][graphspell] merge multid: multi-dictionaries > editable personal dictionary check-in: 607d532bca user: olr tags: trunk, new_feature, fx, graphspell
2018-02-13
13:39
[core][js] multi-dictionaries check-in: d8959f76a4 user: olr tags: core, multid
13:38
[graphspell][js] rename vars check-in: 62c389aedf user: olr tags: trunk, graphspell
10:47
[graphspell][js] spellchecker wrapper for ibdawg check-in: c989c20101 user: olr tags: trunk, graphspell

Modified compile_rules.py from [7fa2e820bf] to [9bd1433006].

52
53
54
55
56
57
58
59

60
61
62
63
64
65
66
52
53
54
55
56
57
58

59
60
61
62
63
64
65
66







-
+







    s = re.sub(r"textarea0\(\s*", 'look(sx, ', s)                                           # textarea0(s)
    s = re.sub(r"before0_chk1\(\s*", 'look_chk1(dDA, sx[:m.start()], 0, ', s)               # before0_chk1(s)
    s = re.sub(r"after0_chk1\(\s*", 'look_chk1(dDA, sx[m.end():], m.end(), ', s)            # after0_chk1(s)
    s = re.sub(r"textarea0_chk1\(\s*", 'look_chk1(dDA, sx, 0, ', s)                         # textarea0_chk1(s)
    s = re.sub(r"isEndOfNG\(\s*\)", 'isEndOfNG(dDA, s[m.end():], m.end())', s)              # isEndOfNG(s)
    s = re.sub(r"isNextNotCOD\(\s*\)", 'isNextNotCOD(dDA, s[m.end():], m.end())', s)        # isNextNotCOD(s)
    s = re.sub(r"isNextVerb\(\s*\)", 'isNextVerb(dDA, s[m.end():], m.end())', s)            # isNextVerb(s)
    s = re.sub(r"\bspell *[(]", '_oDict.isValid(', s)
    s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s)
    s = re.sub(r"[\\](\d+)", 'm.group(\\1)', s)
    return s


def uppercase (s, sLang):
    "(flag i is not enough): converts regex to uppercase regex: 'foo' becomes '[Ff][Oo][Oo]', but 'Bar' becomes 'B[Aa][Rr]'."
    sUp = ""

Modified gc_core/js/lang_core/gc_engine.js from [2f5e964b5d] to [e7bf2c39e9].

34
35
36
37
38
39
40
41

42
43
44
45
46
47
48
34
35
36
37
38
39
40

41
42
43
44
45
46
47
48







-
+







}


// data
let _sAppContext = "";                                  // what software is running
let _dOptions = null;
let _aIgnoredRules = new Set();
let _oDict = null;
let _oSpellChecker = null;
let _dAnalyses = new Map();                             // cache for data from dictionary


var gc_engine = {

    //// Informations

318
319
320
321
322
323
324
325
326


327
328

329
330
331
332
333
334
335
336
337
338
339


340
341
342
343
344
345
346
318
319
320
321
322
323
324


325
326
327

328
329
330
331
332
333
334
335
336
337


338
339
340
341
342
343
344
345
346







-
-
+
+

-
+









-
-
+
+







    },

    //// Initialization

    load: function (sContext="JavaScript", sPath="") {
        try {
            if (typeof(require) !== 'undefined') {
                var ibdawg = require("resource://grammalecte/graphspell/ibdawg.js");
                _oDict = new ibdawg.IBDAWG("${dic_filename}.json");
                var spellchecker = require("resource://grammalecte/graphspell/spellchecker.js");
                _oSpellChecker = new spellchecker.SpellChecker("${lang}", "", "${dic_filename}.json");
            } else {
                _oDict = new IBDAWG("${dic_filename}.json", sPath);
                _oSpellChecker = new SpellChecker("${lang}", sPath, "${dic_main_filename_js}", "${dic_extended_filename_js}", "${dic_personal_filename_js}");
            }
            _sAppContext = sContext;
            _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
        }
        catch (e) {
            helpers.logerror(e);
        }
    },

    getDictionary: function () {
        return _oDict;
    getSpellChecker: function () {
        return _oSpellChecker;
    },

    //// Options

    setOption: function (sOpt, bVal) {
        if (_dOptions.has(sOpt)) {
            _dOptions.set(sOpt, bVal);
386
387
388
389
390
391
392
393
394
395



396
397
398
399
400
401
402
386
387
388
389
390
391
392



393
394
395
396
397
398
399
400
401
402







-
-
-
+
+
+







        helpers.echo("DA: " + dDA.get(aWord[0]));
    }
    helpers.echo("FSA: " + _dAnalyses.get(aWord[1]));
    return true;
}

function _storeMorphFromFSA (sWord) {
    // retrieves morphologies list from _oDict -> _dAnalyses
    //helpers.echo("register: "+sWord + " " + _oDict.getMorph(sWord).toString())
    _dAnalyses.set(sWord, _oDict.getMorph(sWord));
    // retrieves morphologies list from _oSpellChecker -> _dAnalyses
    //helpers.echo("register: "+sWord + " " + _oSpellChecker.getMorph(sWord).toString())
    _dAnalyses.set(sWord, _oSpellChecker.getMorph(sWord));
    return !!_dAnalyses.get(sWord);
}

function morph (dDA, aWord, sPattern, bStrict=true, bNoWord=false) {
    // analyse a tuple (position, word), return true if sPattern in morphologies (disambiguation on)
    if (!aWord) {
        //helpers.echo("morph: noword, returns " + bNoWord);
640
641
642
643
644
645
646
647

648
649
650
651
652
653
640
641
642
643
644
645
646

647
648
649
650
651
652
653







-
+






    exports._rewrite = gc_engine._rewrite;
    exports.ignoreRule = gc_engine.ignoreRule;
    exports.resetIgnoreRules = gc_engine.resetIgnoreRules;
    exports.reactivateRule = gc_engine.reactivateRule;
    exports.listRules = gc_engine.listRules;
    exports._getRules = gc_engine._getRules;
    exports.load = gc_engine.load;
    exports.getDictionary = gc_engine.getDictionary;
    exports.getSpellChecker = gc_engine.getSpellChecker;
    exports.setOption = gc_engine.setOption;
    exports.setOptions = gc_engine.setOptions;
    exports.getOptions = gc_engine.getOptions;
    exports.getDefaultOptions = gc_engine.getDefaultOptions;
    exports.resetOptions = gc_engine.resetOptions;
}

Modified gc_core/py/lang_core/gc_engine.py from [9fc11201d4] to [b15edd00ca].

1
2
3
4
5
6
7
8
9
10
11

12
13
14
15
16
17

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

38
39
40
41
42
43
44
1
2
3
4
5
6
7
8
9
10

11
12
13
14
15
16

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

37
38
39
40
41
42
43
44










-
+





-
+



















-
+







# Grammalecte
# Grammar checker engine

import re
import sys
import os
import traceback
#import unicodedata
from itertools import chain

from ..graphspell.ibdawg import IBDAWG
from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from . import gc_options


__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
            "load", "parse", "getDictionary", \
            "load", "parse", "getSpellChecker", \
            "setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
            "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules" ]

__version__ = "${version}"


lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"

_rules = None                               # module gc_rules

# data
_sAppContext = ""                           # what software is running
_dOptions = None
_aIgnoredRules = set()
_oDict = None
_oSpellChecker = None
_dAnalyses = {}                             # cache for data from dictionary



#### Parsing

def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False):
284
285
286
287
288
289
290
291

292
293
294
295

296
297
298
299
300
301
302
284
285
286
287
288
289
290

291
292
293
294

295
296
297
298
299
300
301
302







-
+



-
+







    #import lightproof_handler_${implname} as opt
    _createError = _createWriterError
except ImportError:
    _createError = _createDictError


def load (sContext="Python"):
    global _oDict
    global _oSpellChecker
    global _sAppContext
    global _dOptions
    try:
        _oDict = IBDAWG("${dic_filename}.bdic")
        _oSpellChecker = SpellChecker("${lang}", "${dic_main_filename_py}", "${dic_extended_filename_py}", "${dic_personal_filename_py}")
        _sAppContext = sContext
        _dOptions = dict(gc_options.getOptions(sContext))   # duplication necessary, to be able to reset to default
    except:
        traceback.print_exc()


def setOption (sOpt, bVal):
329
330
331
332
333
334
335
336
337


338
339
340
341
342
343
344
329
330
331
332
333
334
335


336
337
338
339
340
341
342
343
344







-
-
+
+









def resetOptions ():
    global _dOptions
    _dOptions = dict(gc_options.getOptions(_sAppContext))


def getDictionary ():
    return _oDict
def getSpellChecker ():
    return _oSpellChecker


def _getRules (bParagraph):
    try:
        if not bParagraph:
            return _rules.lSentenceRules
        return _rules.lParagraphRules
394
395
396
397
398
399
400
401

402
403

404
405
406
407
408
409
410
394
395
396
397
398
399
400

401
402

403
404
405
406
407
408
409
410







-
+

-
+







    if tWord[0] in dDA:
        echo("DA: " + str(dDA[tWord[0]]))
    echo("FSA: " + str(_dAnalyses[tWord[1]]))
    return True


def _storeMorphFromFSA (sWord):
    "retrieves morphologies list from _oDict -> _dAnalyses"
    "retrieves morphologies list from _oSpellChecker -> _dAnalyses"
    global _dAnalyses
    _dAnalyses[sWord] = _oDict.getMorph(sWord)
    _dAnalyses[sWord] = _oSpellChecker.getMorph(sWord)
    return True  if _dAnalyses[sWord]  else False


def morph (dDA, tWord, sPattern, bStrict=True, bNoWord=False):
    "analyse a tuple (position, word), return True if sPattern in morphologies (disambiguation on)"
    if not tWord:
        return bNoWord

Modified gc_core/py/oxt/Grammalecte.py from [0f730fb3f5] to [1e4211990b].

39
40
41
42
43
44
45


46
47
48
49
50
51
52
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54







+
+







        dOpt = Options.load(xCurCtx)
        gce.setOptions(dOpt)
        # store for results of big paragraphs
        self.dResult = {}
        self.nMaxRes = 1500
        self.lLastRes = deque(maxlen=self.nMaxRes)
        self.nRes = 0
        #oSpellChecker = gce.getSpellChecker();
        #oSpellChecker.setPersonalDictionary("fr.personal.json")

    # XServiceName method implementations
    def getServiceName (self):
        return self.ImplementationName

    # XServiceInfo method implementations
    def getImplementationName (self):
128
129
130
131
132
133
134
135
136


137
138
139
140
141
142
143
130
131
132
133
134
135
136


137
138
139
140
141
142
143
144
145







-
-
+
+







        gce.resetIgnoreRules()

    # XServiceDisplayName
    def getServiceDisplayName (self, aLocale):
        return gce.name

    # Grammalecte
    def getDictionary (self):
        return gce.getDictionary()
    def getSpellChecker (self):
        return gce.getSpellChecker()


g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation(Grammalecte, "org.openoffice.comp.pyuno.Lightproof."+gce.pkg, ("com.sun.star.linguistic2.Proofreader",),)

# g_ImplementationHelper.addImplementation( opt_handler.LightproofOptionsEventHandler, \
#     "org.openoffice.comp.pyuno.LightproofOptionsEventHandler." + gce.pkg, ("com.sun.star.awt.XContainerWindowEventHandler",),)

Modified gc_lang/fr/config.ini from [a48aa6be74] to [4ba46a1e3e].

10
11
12
13
14
15
16
17

18
19
20
21
22








23
24
25
26
27
28
29

30
31
32
33
34
35
36
10
11
12
13
14
15
16

17
18

19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

35
36
37
38
39
40
41
42







-
+

-

-

+
+
+
+
+
+
+
+






-
+







author = Olivier R.
provider = Dicollecte
link = http://grammalecte.net
description = Correcteur grammatical pour le français.
extras = README_fr.txt
logo = logo.png

# lexicon source
# main dictionary
lexicon_src = lexicons/French.lex
# binary dictionary file name
dic_filename = fr
# binary dictionary name
dic_name = French
# extended dictionary
lexicon_extended_src = lexicons/French.extended.lex
dic_extended_filename = fr.extended
dic_extended_name = Français - dictionnaire étendu
# personal dictionary
lexicon_personal_src = lexicons/French.personal.lex
dic_personal_filename = fr.personal
dic_personal_name = Français - dictionnaire personnel
# Finite state automaton compression: 1, 2 (experimental) or 3 (experimental)
fsa_method = 1
# stemming method: S for suffixes only, A for prefixes and suffixes
stemming_method = S

# LibreOffice
unopkg = C:/Program Files/LibreOffice 5/program/unopkg.com
unopkg = C:/Program Files/LibreOffice/program/unopkg.com
oxt_version = 6.2
oxt_identifier = French.linguistic.resources.from.Dicollecte.by.OlivierR

# Firefox
fx_identifier = French-GC@grammalecte.net
fx_name = Grammalecte [fr]

Modified gc_lang/fr/modules-js/gce_suggestions.js from [d955978cbc] to [0c31bc1a27].

206
207
208
209
210
211
212
213

214
215
216

217
218
219
220

221
222
223

224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243

244
245
246

247
248
249
250

251
252
253
254
255
256
257
206
207
208
209
210
211
212

213
214
215

216
217
218
219

220
221
222

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

243
244
245

246
247
248
249

250
251
252
253
254
255
256
257







-
+


-
+



-
+


-
+



















-
+


-
+



-
+







        } else if (sGender == ":f") {
            return suggFemPlur(sFlex);
        }
    }
    let aSugg = new Set();
    if (!sFlex.includes("-")) {
        if (sFlex.endsWith("l")) {
            if (sFlex.endsWith("al") && sFlex.length > 2 && _oDict.isValid(sFlex.slice(0,-1)+"ux")) {
            if (sFlex.endsWith("al") && sFlex.length > 2 && _oSpellChecker.isValid(sFlex.slice(0,-1)+"ux")) {
                aSugg.add(sFlex.slice(0,-1)+"ux");
            }
            if (sFlex.endsWith("ail") && sFlex.length > 3 && _oDict.isValid(sFlex.slice(0,-2)+"ux")) {
            if (sFlex.endsWith("ail") && sFlex.length > 3 && _oSpellChecker.isValid(sFlex.slice(0,-2)+"ux")) {
                aSugg.add(sFlex.slice(0,-2)+"ux");
            }
        }
        if (_oDict.isValid(sFlex+"s")) {
        if (_oSpellChecker.isValid(sFlex+"s")) {
            aSugg.add(sFlex+"s");
        }
        if (_oDict.isValid(sFlex+"x")) {
        if (_oSpellChecker.isValid(sFlex+"x")) {
            aSugg.add(sFlex+"x");
        }
    }
    if (mfsp.hasMiscPlural(sFlex)) {
        mfsp.getMiscPlural(sFlex).forEach(function(x) { aSugg.add(x); });
    }
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

function suggSing (sFlex) {
    // returns singular forms assuming sFlex is plural
    if (sFlex.includes("-")) {
        return "";
    }
    let aSugg = new Set();
    if (sFlex.endsWith("ux")) {
        if (_oDict.isValid(sFlex.slice(0,-2)+"l")) {
        if (_oSpellChecker.isValid(sFlex.slice(0,-2)+"l")) {
            aSugg.add(sFlex.slice(0,-2)+"l");
        }
        if (_oDict.isValid(sFlex.slice(0,-2)+"il")) {
        if (_oSpellChecker.isValid(sFlex.slice(0,-2)+"il")) {
            aSugg.add(sFlex.slice(0,-2)+"il");
        }
    }
    if (_oDict.isValid(sFlex.slice(0,-1))) {
    if (_oSpellChecker.isValid(sFlex.slice(0,-1))) {
        aSugg.add(sFlex.slice(0,-1));
    }
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

Modified gc_lang/fr/modules-js/lexicographe.js from [be510450a4] to [e3263a5103].

222
223
224
225
226
227
228
229
230


231
232
233
234
235
236
237
222
223
224
225
226
227
228


229
230
231
232
233
234
235
236
237







-
-
+
+







    ['<', "inférieur à"],
    ['>', "supérieur à"],
]);


class Lexicographe {

    constructor (oDict, oTokenizer, oLocGraph) {
        this.oDict = oDict;
    constructor (oSpellChecker, oTokenizer, oLocGraph) {
        this.oSpellChecker = oSpellChecker;
        this.oTokenizer = oTokenizer;
        this.oLocGraph = JSON.parse(oLocGraph);

        this._zPartDemForm = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(là|ci)$", "i");
        this._aPartDemExceptList = new Set(["celui", "celle", "ceux", "celles", "de", "jusque", "par", "marie-couche-toi"]);
        this._zInterroVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(t-(?:il|elle|on)|je|tu|ils?|elles?|on|[nv]ous)$", "i");
        this._zImperatifVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|y|en|[mts][’'](?:y|en)|les?|la|[mt]oi|leur|lui)$", "i");
337
338
339
340
341
342
343
344

345
346
347
348
349
350
351
337
338
339
340
341
342
343

344
345
346
347
348
349
350
351







-
+







                            sValue: oToken.sValue,
                            aLabel: ["forme verbale interrogative"],
                            aSubElem: [
                                { sType: oToken.sType, sValue: m[1],       aLabel: this._getMorph(m[1]) },
                                { sType: oToken.sType, sValue: "-" + m[2], aLabel: [this._formatSuffix(m[2].toLowerCase())] }
                            ]
                        };
                    } else if (this.oDict.isValidToken(oToken.sValue)) {
                    } else if (this.oSpellChecker.isValidToken(oToken.sValue)) {
                        return {
                            sType: oToken.sType,
                            sValue: oToken.sValue,
                            aLabel: this._getMorph(oToken.sValue)
                        };
                    } else {
                        return {
360
361
362
363
364
365
366
367

368
369
370
371
372
373
374
360
361
362
363
364
365
366

367
368
369
370
371
372
373
374







-
+







            helpers.logerror(e);
        }
        return null;
    }

    _getMorph (sWord) {
        let aElem = [];
        for (let s of this.oDict.getMorph(sWord)) {
        for (let s of this.oSpellChecker.getMorph(sWord)) {
            if (s.includes(":")) aElem.push(this._formatTags(s));
        }
        if (aElem.length == 0) {
            aElem.push("mot inconnu du dictionnaire");
        }
        return aElem;
    }

Modified gc_lang/fr/modules/gce_suggestions.py from [50fbeb414d] to [79835965e4].

155
156
157
158
159
160
161
162

163
164

165
166

167
168

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183

184
185

186
187

188
189
190
191
192
193
194
155
156
157
158
159
160
161

162
163

164
165

166
167

168
169
170
171
172
173
174
175
176
177
178
179
180
181
182

183
184

185
186

187
188
189
190
191
192
193
194







-
+

-
+

-
+

-
+














-
+

-
+

-
+







        if sGender == ":m":
            return suggMasPlur(sFlex)
        elif sGender == ":f":
            return suggFemPlur(sFlex)
    aSugg = set()
    if "-" not in sFlex:
        if sFlex.endswith("l"):
            if sFlex.endswith("al") and len(sFlex) > 2 and _oDict.isValid(sFlex[:-1]+"ux"):
            if sFlex.endswith("al") and len(sFlex) > 2 and _oSpellChecker.isValid(sFlex[:-1]+"ux"):
                aSugg.add(sFlex[:-1]+"ux")
            if sFlex.endswith("ail") and len(sFlex) > 3 and _oDict.isValid(sFlex[:-2]+"ux"):
            if sFlex.endswith("ail") and len(sFlex) > 3 and _oSpellChecker.isValid(sFlex[:-2]+"ux"):
                aSugg.add(sFlex[:-2]+"ux")
        if _oDict.isValid(sFlex+"s"):
        if _oSpellChecker.isValid(sFlex+"s"):
            aSugg.add(sFlex+"s")
        if _oDict.isValid(sFlex+"x"):
        if _oSpellChecker.isValid(sFlex+"x"):
            aSugg.add(sFlex+"x")
    if mfsp.hasMiscPlural(sFlex):
        aSugg.update(mfsp.getMiscPlural(sFlex))
    if aSugg:
        return "|".join(aSugg)
    return ""


def suggSing (sFlex):
    "returns singular forms assuming sFlex is plural"
    if "-" in sFlex:
        return ""
    aSugg = set()
    if sFlex.endswith("ux"):
        if _oDict.isValid(sFlex[:-2]+"l"):
        if _oSpellChecker.isValid(sFlex[:-2]+"l"):
            aSugg.add(sFlex[:-2]+"l")
        if _oDict.isValid(sFlex[:-2]+"il"):
        if _oSpellChecker.isValid(sFlex[:-2]+"il"):
            aSugg.add(sFlex[:-2]+"il")
    if _oDict.isValid(sFlex[:-1]):
    if _oSpellChecker.isValid(sFlex[:-1]):
        aSugg.add(sFlex[:-1])
    if aSugg:
        return "|".join(aSugg)
    return ""


def suggMasSing (sFlex, bSuggSimil=False):

Modified gc_lang/fr/modules/lexicographe.py from [7b36598d08] to [75ede82f17].

153
154
155
156
157
158
159
160
161


162
163
164
165
166
167
168
153
154
155
156
157
158
159


160
161
162
163
164
165
166
167
168







-
-
+
+







    "t'en": " (te) pronom personnel objet + (en) pronom adverbial",
    "s'en": " (se) pronom personnel objet + (en) pronom adverbial",
}


class Lexicographe:

    def __init__ (self, oDict):
        self.oDict = oDict
    def __init__ (self, oSpellChecker):
        self.oSpellChecker = oSpellChecker
        self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
        self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
        self._zTag = re.compile("[:;/][\\w*][^:;/]*")

    def analyzeWord (self, sWord):
        try:
            if not sWord:
179
180
181
182
183
184
185
186

187
188
189
190
191
192
193
179
180
181
182
183
184
185

186
187
188
189
190
191
192
193







-
+







                sWord = m.group(2)
                aMorph.append( "{}’ : {}".format(m.group(1), _dPFX.get(m.group(1).lower(), "[?]")) )
            # mots composés
            m2 = self._zCompoundWord.match(sWord)
            if m2:
                sWord = m2.group(1)
            # Morphologies
            lMorph = self.oDict.getMorph(sWord)
            lMorph = self.oSpellChecker.getMorph(sWord)
            if len(lMorph) > 1:
                # sublist
                aMorph.append( (sWord, [ self.formatTags(s)  for s in lMorph  if ":" in s ]) )
            elif len(lMorph) == 1:
                aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
            else:
                aMorph.append( "{} :  inconnu du dictionnaire".format(sWord) )

Modified gc_lang/fr/oxt/ContextMenu/ContextMenu.py from [3b33016da8] to [512c45de75].

8
9
10
11
12
13
14
15
16


17
18
19
20
21

22
23
24
25
26
27
28
8
9
10
11
12
13
14


15
16
17
18
19
20

21
22
23
24
25
26
27
28







-
-
+
+




-
+








from com.sun.star.task import XJob
from com.sun.star.ui import XContextMenuInterceptor
#from com.sun.star.ui.ContextMenuInterceptorAction import IGNORED
#from com.sun.star.ui.ContextMenuInterceptorAction import EXECUTE_MODIFIED

import grammalecte.fr.lexicographe as lxg
from grammalecte.ibdawg import IBDAWG
from grammalecte.echo import echo
from grammalecte.graphspell.spellchecker import SpellChecker
from grammalecte.graphspell.echo import echo
import helpers


xDesktop = None
oDict = None
oSpellChecker = None
oLexicographe = None


class MyContextMenuInterceptor (XContextMenuInterceptor, unohelper.Base):
    def __init__ (self, ctx):
        self.ctx = ctx

115
116
117
118
119
120
121
122

123
124
125
126
127

128
129
130

131
132

133
134

135
136

137
138
139
140
141
142
143
115
116
117
118
119
120
121

122
123
124
125
126

127
128
129

130
131

132
133

134
135

136
137
138
139
140
141
142
143







-
+




-
+


-
+

-
+

-
+

-
+







        return xCursor.String.strip('.')


class JobExecutor (XJob, unohelper.Base):
    def __init__ (self, ctx):
        self.ctx = ctx
        global xDesktop
        global oDict
        global oSpellChecker
        global oLexicographe
        try:
            if not xDesktop:
                xDesktop = self.ctx.getServiceManager().createInstanceWithContext('com.sun.star.frame.Desktop', self.ctx)
            if not oDict:
            if not oSpellChecker:
                xCurCtx = uno.getComponentContext()
                oGC = self.ctx.ServiceManager.createInstanceWithContext("org.openoffice.comp.pyuno.Lightproof.grammalecte", self.ctx)
                if hasattr(oGC, "getDictionary"):
                if hasattr(oGC, "getSpellChecker"):
                    # https://bugs.documentfoundation.org/show_bug.cgi?id=97790
                    oDict = oGC.getDictionary()
                    oSpellChecker = oGC.getSpellChecker()
                else:
                    oDict = IBDAWG("French.bdic")
                    oSpellChecker = SpellChecker("${lang}", "${dic_filename}.bdic")
            if not oLexicographe:
                oLexicographe = lxg.Lexicographe(oDict)
                oLexicographe = lxg.Lexicographe(oSpellChecker)
        except:
            traceback.print_exc()
        
    def execute (self, args):
        if not args:
            return
        try:

Modified gc_lang/fr/webext/background.js from [f7965949be] to [1d15c00f44].

58
59
60
61
62
63
64



65
66
67
68
69
70
71
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74







+
+
+







                }
                browser.runtime.sendMessage(e.data);
                break;
            case "setOptions":
            case "setOption":
                storeGCOptions(result);
                break;
            case "setDictionary":
                console.log("[background] " + sActionDone + ": " + result);
                break;
            default:
                console.log("[background] Unknown command: " + sActionDone);
                console.log(e.data);
        }
    }
    catch (e) {
        showError(e);
85
86
87
88
89
90
91

















92
93
94
95
96


97
98
99
100


101
102
103
104
105
106
107
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131







+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+





+
+




+
+







    let dOptions = (dSavedOptions.hasOwnProperty("gc_options")) ? dSavedOptions.gc_options : null;
    xGCEWorker.postMessage({
        sCommand: "init",
        dParam: {sExtensionPath: browser.extension.getURL(""), dOptions: dOptions, sContext: "Firefox"},
        dInfo: {}
    });
}

function setSpellingDictionary (dSavedDictionary) {
    if (dSavedDictionary.hasOwnProperty("oExtendedDictionary")) {
        xGCEWorker.postMessage({
            sCommand: "setDictionary",
            dParam: { sType: "extended", oDict: dSavedDictionary["oExtendedDictionary"] },
            dInfo: {}
        });
    }
    else if (dSavedDictionary.hasOwnProperty("oPersonalDictionary")) {
        xGCEWorker.postMessage({
            sCommand: "setDictionary",
            dParam: { sType: "personal", oDict: dSavedDictionary["oPersonalDictionary"] },
            dInfo: {}
        });
    }
}

function init () {
    if (bChrome) {
        browser.storage.local.get("gc_options", initGrammarChecker);
        browser.storage.local.get("ui_options", initUIOptions);
        browser.storage.local.get("oExtendedDictionary", setSpellingDictionary);
        browser.storage.local.get("oPersonalDictionary", setSpellingDictionary);
        return;
    }
    browser.storage.local.get("gc_options").then(initGrammarChecker, showError);
    browser.storage.local.get("ui_options").then(initUIOptions, showError);
    browser.storage.local.get("oExtendedDictionary").then(setSpellingDictionary, showError);
    browser.storage.local.get("oPersonalDictionary").then(setSpellingDictionary, showError);
}

init();


browser.runtime.onInstalled.addListener(function (oDetails) {
    // launched at installation or update
131
132
133
134
135
136
137

138
139
140
141
142
143
144
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169







+







        case "getOptions":
        case "getDefaultOptions":
        case "setOptions":
        case "setOption":
        case "resetOptions":
        case "textToTest":
        case "fullTests":
        case "setDictionary":
            xGCEWorker.postMessage(oRequest);
            break;
        case "openURL":
            browser.tabs.create({url: dParam.sURL});
            break;
        default:
            console.log("[background] Unknown command: " + sCommand);

Modified gc_lang/fr/webext/gce_worker.js from [2a64cce6de] to [c20f81d8f3].

32
33
34
35
36
37
38

39
40
41
42
43
44
45
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46







+







//console.log("[Worker] GC Engine Worker [start]");
//console.log(self);

importScripts("grammalecte/graphspell/helpers.js");
importScripts("grammalecte/graphspell/str_transform.js");
importScripts("grammalecte/graphspell/char_player.js");
importScripts("grammalecte/graphspell/ibdawg.js");
importScripts("grammalecte/graphspell/spellchecker.js");
importScripts("grammalecte/text.js");
importScripts("grammalecte/graphspell/tokenizer.js");
importScripts("grammalecte/fr/conj.js");
importScripts("grammalecte/fr/mfsp.js");
importScripts("grammalecte/fr/phonet.js");
importScripts("grammalecte/fr/cregex.js");
importScripts("grammalecte/fr/gc_options.js");
116
117
118
119
120
121
122



123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139

140
141
142
143
144
145
146
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142

143
144
145
146
147
148
149
150







+
+
+
















-
+







            break;
        case "textToTest":
            textToTest(dParam.sText, dParam.sCountry, dParam.bDebug, dParam.bContext, dInfo);
            break;
        case "fullTests":
            fullTests(dInfo);
            break;
        case "setDictionary":
            setDictionary(dParam.sType, dParam.oDict, dInfo);
            break;
        case "getSpellSuggestions":
            getSpellSuggestions(dParam.sWord, dInfo);
            break;
        case "getListOfTokens":
            getListOfTokens(dParam.sText, dInfo);
            break;
        default:
            console.log("[Worker] Unknown command: " + sCommand);
            showData(e.data);
    }
}



let bInitDone = false;

let oDict = null;
let oSpellChecker = null;
let oTokenizer = null;
let oLxg = null;
let oTest = null;
let oLocution = null;


/*
157
158
159
160
161
162
163
164

165
166
167
168
169
170

171
172
173
174
175
176
177
161
162
163
164
165
166
167

168
169
170
171
172
173

174
175
176
177
178
179
180
181







-
+





-
+







        if (!bInitDone) {
            //console.log("[Worker] Loading… Extension path: " + sExtensionPath);
            conj.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/conj_data.json"));
            phonet.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/phonet_data.json"));
            mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json"));
            //console.log("[Worker] Modules have been initialized…");
            gc_engine.load(sContext, sExtensionPath+"grammalecte/graphspell/_dictionaries");
            oDict = gc_engine.getDictionary();
            oSpellChecker = gc_engine.getSpellChecker();
            oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json");
            oTokenizer = new Tokenizer("fr");

            oLocution =  helpers.loadFile(sExtensionPath + "/grammalecte/fr/locutions_data.json");

            oLxg = new Lexicographe(oDict, oTokenizer, oLocution);
            oLxg = new Lexicographe(oSpellChecker, oTokenizer, oLocution);
            if (dOptions !== null) {
                gc_engine.setOptions(dOptions);
            }
            //tests();
            bInitDone = true;
        } else {
            console.log("[Worker] Already initialized…")
196
197
198
199
200
201
202
203

204
205
206
207
208
209
210
211
212
213

214
215
216
217
218
219
220
200
201
202
203
204
205
206

207
208
209
210
211
212
213
214
215
216

217
218
219
220
221
222
223
224







-
+









-
+







}

function parseAndSpellcheck (sText, sCountry, bDebug, bContext, dInfo={}) {
    let i = 0;
    sText = sText.replace(/­/g, "").normalize("NFC");
    for (let sParagraph of text.getParagraph(sText)) {
        let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
        postMessage(createResponse("parseAndSpellcheck", {sParagraph: sParagraph, iParaNum: i, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, false));
        i += 1;
    }
    postMessage(createResponse("parseAndSpellcheck", null, dInfo, true));
}

function parseAndSpellcheck1 (sParagraph, sCountry, bDebug, bContext, dInfo={}) {
    sParagraph = sParagraph.replace(/­/g, "").normalize("NFC");
    let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
    postMessage(createResponse("parseAndSpellcheck1", {sParagraph: sParagraph, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, true));
}

function getOptions (dInfo={}) {
    postMessage(createResponse("getOptions", gc_engine.getOptions(), dInfo, true));
}

284
285
286
287
288
289
290
291

292























293
294
295


296
297

298
299




300
301
302
303
304
305
306
288
289
290
291
292
293
294

295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320


321
322
323
324
325


326
327
328
329
330
331
332
333
334
335
336







-
+

+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

-
-
+
+


+
-
-
+
+
+
+







        console.log(sRes);
    }
    gc_engine.setOptions(dMemoOptions);
    postMessage(createResponse("fullTests", sMsg, dInfo, true));
}


// Spellchecker
// SpellChecker

function setDictionary (sType, oDict, dInfo) {
    if (!oSpellChecker) {
        postMessage(createResponse("setDictionary", "# Error. SpellChecker not loaded.", dInfo, true));
        return;
    }
    switch (sType) {
        case "main":
            oSpellChecker.setMainDictionary(oDict);
            postMessage(createResponse("setDictionary", true, dInfo, true));
            break;
        case "extended":
            oSpellChecker.setExtendedDictionary(oDict);
            postMessage(createResponse("setDictionary", true, dInfo, true));
            break;
        case "personal":
            oSpellChecker.setPersonalDictionary(oDict);
            postMessage(createResponse("setDictionary", true, dInfo, true));
            break;
        default:
            console.log("[worker] setDictionary: Unknown command");
    }
}

function getSpellSuggestions (sWord, dInfo) {
    if (!oDict) {
        postMessage(createResponse("getSpellSuggestions", "# Error. Dictionary not loaded.", dInfo, true));
    if (!oSpellChecker) {
        postMessage(createResponse("getSpellSuggestions", "# Error. SpellChecker not loaded.", dInfo, true));
        return;
    }
    let i = 1;
    let aSugg = oDict.suggest(sWord);
    postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg}, dInfo, true));
    for (let aSugg of oSpellChecker.suggest(sWord)) {
        postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg, iSugg: i}, dInfo, true));
        i += 1;
    }
}


// Lexicographer

function getListOfTokens (sText, dInfo={}) {
    try {

Modified gc_lang/fr/webext/panel/lex_editor.js from [629a2b4b0e] to [7139f8d80b].

568
569
570
571
572
573
574
575

576
577
578

579
580
581
582
583

584
585
586
587

588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607

608
609
610
611

612
613
614
615
616
617
618
619
620
621

622
623
624
625
568
569
570
571
572
573
574

575
576
577

578
579
580
581
582

583
584
585
586

587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611

612
613
614
615
616
617
618
619
620
621

622
623
624
625
626







-
+


-
+




-
+



-
+




















+



-
+









-
+





const oBinaryDict = {
    
    oIBDAWG: null,

    load: function () {
        if (bChrome) {
            browser.storage.local.get("oDictionary", this._load);
            browser.storage.local.get("oPersonalDictionary", this._load);
            return;
        }
        let xPromise = browser.storage.local.get("oDictionary");
        let xPromise = browser.storage.local.get("oPersonalDictionary");
        xPromise.then(this._load.bind(this), showError);
    },

    _load: function (oResult) {
        if (!oResult.hasOwnProperty("oDictionary")) {
        if (!oResult.hasOwnProperty("oPersonalDictionary")) {
            oWidgets.hideElement("export_button");
            return;
        }
        let oJSON = oResult.oDictionary;
        let oJSON = oResult.oPersonalDictionary;
        this.oIBDAWG = new IBDAWG(oJSON);
        let lEntry = [];
        for (let s of this.oIBDAWG.select()) {
            lEntry.push(s.split("\t"));
        }        
        oLexicon.set(lEntry);
        oWidgets.setDictData(this.oIBDAWG.nEntry, this.oIBDAWG.sDate);
        oWidgets.showElement("export_button");
    },

    build: function (lEntry) {
        oWidgets.showElement("build_progress");
        let xProgressNode = document.getElementById("build_progress");
        let oDAWG = new DAWG(lEntry, "S", "fr", "Français", "Dictionnaire personnel", xProgressNode);
        let oJSON = oDAWG.createBinaryJSON(1);
        this.save(oJSON);
        this.oIBDAWG = new IBDAWG(oJSON);
        oWidgets.setDictData(this.oIBDAWG.nEntry, this.oIBDAWG.sDate);
        oWidgets.hideElement("build_progress");
        oWidgets.showElement("export_button");
        browser.runtime.sendMessage({ sCommand: "setDictionary", dParam: {sType: "personal", oDict: oJSON}, dInfo: {} });
    },

    save: function (oJSON) {
        browser.storage.local.set({ "oDictionary": oJSON });
        browser.storage.local.set({ "oPersonalDictionary": oJSON });
    },

    import: function () {
        // TO DO
    },

    export: function () {
        let xBlob = new Blob([ JSON.stringify(this.oIBDAWG.getJSON()) ], {type: 'application/json'}); 
        let sURL = URL.createObjectURL(xBlob);
        browser.downloads.download({ filename: "grammalecte_dictionnaire_personnel.json", url: sURL, saveAs: true });
        browser.downloads.download({ filename: "fr.personal.json", url: sURL, saveAs: true });
    }
}

oBinaryDict.load();

Modified grammalecte-cli.py from [07800caa2b] to [d66b04b565].

40
41
42
43
44
45
46
47

48
49
50
51
52

53
54



55
56
57
58
59
60


61
62
63
64
65
66
67


68
69
70
71
72
73
74
40
41
42
43
44
45
46

47
48
49
50
51

52
53

54
55
56
57
58
59
60


61
62
63
64
65
66
67


68
69
70
71
72
73
74
75
76







-
+




-
+

-
+
+
+




-
-
+
+





-
-
+
+







    if sys.platform == "win32":
        # Apparently, the console transforms «’» in «'».
        # So we reverse it to avoid many useless warnings.
        sText = sText.replace("'", "’")
    return sText


def _getErrors (sText, oTokenizer, oDict, bContext=False, bSpellSugg=False, bDebug=False):
def _getErrors (sText, oTokenizer, oSpellChecker, bContext=False, bSpellSugg=False, bDebug=False):
    "returns a tuple: (grammar errors, spelling errors)"
    aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
            if bSpellSugg:
                dToken['aSuggestions'] = oDict.suggest(dToken['sValue'])
                dToken['aSuggestions'] = []
                for lSugg in oSpellChecker.suggest(dToken['sValue']):
                    dToken['aSuggestions'].extend(lSugg)
            aSpellErrs.append(dToken)
    return aGrammErrs, aSpellErrs


def generateText (sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bSpellSugg, bDebug)
def generateText (sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, False, bSpellSugg, bDebug)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)


def generateJSON (iIndex, sText, oTokenizer, oDict, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, bContext, bSpellSugg, bDebug)
def generateJSON (iIndex, sText, oTokenizer, oSpellChecker, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, bContext, bSpellSugg, bDebug)
    aGrammErrs = list(aGrammErrs)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    if lLineSet:
        aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
        return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    if bReturnText:
126
127
128
129
130
131
132
133

134
135

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152






153
154
155
156
157
158
159
128
129
130
131
132
133
134

135
136

137
138
139
140
141
142
143
144
145
146
147
148






149
150
151
152
153
154
155
156
157
158
159
160
161







-
+

-
+











-
-
-
-
-
-
+
+
+
+
+
+







    xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
    xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
    xArgs = xParser.parse_args()

    gce.load()
    if not xArgs.json:
        echo("Grammalecte v{}".format(gce.version))
    oDict = gce.getDictionary()
    oSpellChecker = gce.getSpellChecker()
    oTokenizer = tkz.Tokenizer("fr")
    oLexGraphe = lxg.Lexicographe(oDict)
    oLexGraphe = lxg.Lexicographe(oSpellChecker)
    if xArgs.textformatter or xArgs.textformatteronly:
        oTF = tf.TextFormatter()

    if xArgs.list_options or xArgs.list_rules:
        if xArgs.list_options:
            gce.displayOptions("fr")
        if xArgs.list_rules:
            gce.displayRules(None  if xArgs.list_rules == "*"  else xArgs.list_rules)
        exit()

    if xArgs.suggest:
        lSugg = oDict.suggest(xArgs.suggest)
        if xArgs.json:
            sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
        else:
            sText = "Suggestions : " + " | ".join(lSugg)
        echo(sText)
        for lSugg in oSpellChecker.suggest(xArgs.suggest):
            if xArgs.json:
                sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
            else:
                sText = "Suggestions : " + " | ".join(lSugg)
            echo(sText)
        exit()

    if not xArgs.json:
        xArgs.context = False

    gce.setOptions({"html": True, "latex": True})
    if xArgs.opt_on:
177
178
179
180
181
182
183
184

185
186

187
188
189
190
191
192
193
194
195
196
197
198
199

200
201

202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220

221
222
223
224
225
226


227
228

229
230

231
232
233
234
235
236
237
179
180
181
182
183
184
185

186
187

188
189
190
191
192
193
194
195
196
197
198
199
200

201
202

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221

222
223
224
225
226


227
228
229

230
231

232
233
234
235
236
237
238
239







-
+

-
+












-
+

-
+


















-
+




-
-
+
+

-
+

-
+







            for i, sText in enumerate(readfile(sFile), 1):
                if xArgs.textformatter or xArgs.textformatteronly:
                    sText = oTF.formatText(sText)
                if xArgs.textformatteronly:
                    output(sText, hDst)
                else:
                    if xArgs.json:
                        sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
                        sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
                    else:
                        sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                        sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                    if sText:
                        if xArgs.json and bComma:
                            output(",\n", hDst)
                        output(sText, hDst)
                        bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        else:
            # concaténation des lignes non séparées par une ligne vide
            for i, lLine in enumerate(readfileAndConcatLines(sFile), 1):
                sText, lLineSet = txt.createParagraphWithLines(lLine)
                if xArgs.json:
                    sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
                    sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
                else:
                    sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                    sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                if sText:
                    if xArgs.json and bComma:
                        output(",\n", hDst)
                    output(sText, hDst)
                    bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        if xArgs.json:
            output("\n]}\n", hDst)
    else:
        # pseudo-console
        sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n"
        sText = _getText(sInputText)
        while True:
            if sText.startswith("?"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo("* " + sWord)
                        for sMorph in oDict.getMorph(sWord):
                        for sMorph in oSpellChecker.getMorph(sWord):
                            echo("  {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
            elif sText.startswith("!"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo(" | ".join(oDict.suggest(sWord)))
                        #echo(" | ".join(oDict.suggest2(sWord)))
                        for lSugg in oSpellChecker.suggest(sWord):
                            echo(" | ".join(lSugg))
            elif sText.startswith(">"):
                oDict.drawPath(sText[1:].strip())
                oSpellChecker.drawPath(sText[1:].strip())
            elif sText.startswith("="):
                for sRes in oDict.select(sText[1:].strip()):
                for sRes in oSpellChecker.select(sText[1:].strip()):
                    echo(sRes)
            elif sText.startswith("/+ "):
                gce.setOptions({ opt:True  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
            elif sText.startswith("/- "):
                gce.setOptions({ opt:False  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
262
263
264
265
266
267
268
269

270
271
272
273
274
275
276
277
278
264
265
266
267
268
269
270

271
272
273
274
275
276
277
278
279
280







-
+









            elif sText.startswith("/rl"):
                # reload (todo)
                pass
            else:
                for sParagraph in txt.getParagraph(sText):
                    if xArgs.textformatter:
                        sText = oTF.formatText(sText)
                    sRes = generateText(sText, oTokenizer, oDict, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
                    sRes = generateText(sText, oTokenizer, oSpellChecker, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
                    if sRes:
                        echo("\n" + sRes)
                    else:
                        echo("\nNo error found.")
            sText = _getText(sInputText)


if __name__ == '__main__':
    main()

Modified grammalecte-server.py from [6dbdf10c60] to [3253a2b2ff].

125
126
127
128
129
130
131
132

133
134
135
136
137

138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

155
156
157
158
159
160
161
125
126
127
128
129
130
131

132
133
134
135
136

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153

154
155
156
157
158
159
160
161







-
+




-
+
















-
+







def genUserId ():
    i = 0
    while True:
        yield str(i)
        i += 1


def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False):
def parseParagraph (iParagraph, sText, oTokenizer, oSpellChecker, dOptions, bDebug=False, bEmptyIfNoErrors=False):
    aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
    aGrammErrs = list(aGrammErrs)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
            aSpellErrs.append(dToken)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return "  " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    

if __name__ == '__main__':

    gce.load("Server")
    echo("Grammalecte v{}".format(gce.version))
    dServerOptions = getServerOptions()
    dGCOptions = getConfigOptions("fr")
    if dGCOptions:
        gce.setOptions(dGCOptions)
    dServerGCOptions = gce.getOptions()
    echo("Grammar options:\n" + " | ".join([ k + ": " + str(v)  for k, v in sorted(dServerGCOptions.items()) ]))
    oDict = gce.getDictionary()
    oSpellChecker = gce.getSpellChecker()
    oTokenizer = tkz.Tokenizer("fr")
    oTF = tf.TextFormatter()
    dUser = {}
    userGenerator = genUserId()

    app = Bottle()

195
196
197
198
199
200
201
202

203
204
205
206
207
208
209
195
196
197
198
199
200
201

202
203
204
205
206
207
208
209







-
+







                dOptions.update(json.loads(request.forms.options))
            except:
                sError = "request options not used"
        sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n'
        for i, sText in enumerate(txt.getParagraph(request.forms.text), 1):
            if bTF:
                sText = oTF.formatText(sText)
            sText = parseParagraph(i, sText, oTokenizer, oDict, dOptions, bEmptyIfNoErrors=True)
            sText = parseParagraph(i, sText, oTokenizer, oSpellChecker, dOptions, bEmptyIfNoErrors=True)
            if sText:
                if bComma:
                    sJSON += ",\n"
                sJSON += sText
                bComma = True
        sJSON += "\n]}\n"
        return sJSON

Modified graphspell-js/ibdawg.js from [cea88b04de] to [6c0fce95e9].

85
86
87
88
89
90
91
92
93

94
95
96
97
98
99
100
85
86
87
88
89
90
91


92
93
94
95
96
97
98
99







-
-
+







    // INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH

    constructor (param1, sPath="") {
        // param1 can be a filename or a object with all the necessary data.
        try {
            let oData = null;
            if (typeof(param1) == "string") {
                let sDicName = param1;
                let sURL = (sPath !== "") ? sPath + "/" + sDicName : "resource://grammalecte/graphspell/_dictionaries/"+sDicName;
                let sURL = (sPath !== "") ? sPath + "/" + param1 : "resource://grammalecte/graphspell/_dictionaries/"+param1;
                oData = JSON.parse(helpers.loadFile(sURL));
            } else {
                oData = param1;
            }
            Object.assign(this, oData);
        }
        catch (e) {
279
280
281
282
283
284
285
286

287
288

289
290
291
292
293
294
295
278
279
280
281
282
283
284

285
286

287
288
289
290
291
292
293
294







-
+

-
+







        return Boolean(this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask);
    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let l = this.morph(sWord);
        if (sWord[0].gl_isUpperCase()) {
            l = l.concat(this.morph(sWord.toLowerCase()));
            l.push(...this.morph(sWord.toLowerCase()));
            if (sWord.gl_isUpperCase() && sWord.length > 1) {
                l = l.concat(this.morph(sWord.gl_toCapitalize()));
                l.push(...this.morph(sWord.gl_toCapitalize()));
            }
        }
        return l;
    }

    suggest (sWord, nSuggLimit=10) {
        // returns a array of suggestions for <sWord>

Modified graphspell-js/spellchecker.js from [ee0b03faf8] to [567dbb14f5].

21
22
23
24
25
26
27
28

29
30

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

50
51
52
53
54
55
56
57

58
59
60




61
62
63
64
65
66
67
68
69

70
71
72
73
74
75

76
77
78
79
80
81

82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102

103
104
105

106
107
108

109
110
111
112
113
114
115
116

117
118
119

120
121
122

123
124
125
126
127
128
129
130

131
132

133
134
135

136
137
138
139
140
141

142
143
144
145
146
147
148
21
22
23
24
25
26
27

28
29

30
31
32


33
34
35
36
37
38
39
40
41
42
43
44
45
46

47

48
49

50
51
52
53
54



55
56
57
58
59
60
61
62
63
64
65
66

67
68
69
70
71
72

73
74
75
76
77
78

79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99

100
101
102

103
104
105

106
107
108
109
110
111
112
113

114
115
116

117
118
119

120
121
122
123
124
125
126
127

128
129

130
131
132

133
134
135
136
137
138

139
140
141
142
143
144
145
146







-
+

-
+


-
-














-
+
-


-




+
-
-
-
+
+
+
+








-
+





-
+





-
+




















-
+


-
+


-
+







-
+


-
+


-
+







-
+

-
+


-
+





-
+








const dDefaultDictionaries = new Map([
    ["fr", "fr.json"],
    ["en", "en.json"]
]);


class Spellchecker {
class SpellChecker {

    constructor (sLangCode, mainDic=null, extentedDic=null, personalDic=null, sPath="") {
    constructor (sLangCode, sPath="", mainDic=null, extentedDic=null, personalDic=null) {
        // returns true if the main dictionary is loaded
        this.sLangCode = sLangCode;
        console.log(sLangCode);
        console.log(mainDic);
        if (mainDic === null) {
            mainDic = dDefaultDictionaries.gl_get(sLangCode, "");
        }
        this.oMainDic = this._loadDictionary(mainDic, sPath, true);
        this.oExtendedDic = this._loadDictionary(extentedDic, sPath);
        this.oPersonalDic = this._loadDictionary(personalDic, sPath);
    }

    _loadDictionary (dictionary, sPath, bNecessary=false) {
        // returns an IBDAWG object
        if (dictionary === null) {
            return null;
        }
        try {
        	if (typeof(require) !== 'undefined') {
            if (typeof(require) !== 'undefined') {
        		console.log(">>>> <resource:>");
                return new ibdawg.IBDAWG(dictionary);  // dictionary can be a filename or a JSON object
            } else {
            	console.log(">>>> no <resource:>");
                return new IBDAWG(dictionary, sPath);  // dictionary can be a filename or a JSON object
            }
        }
        catch (e) {
            let sfDictionary = (typeof(dictionary) == "string") ? dictionary : dictionary.sLangName + "/" + dictionary.sFileName;
        	if (bNecessary) {
        		throw e.message;
        	}
            if (bNecessary) {
                throw "Error: <" + sfDictionary + "> not loaded. " + e.message;
            }
            console.log("Error: <" + sfDictionary + "> not loaded.")
            console.log(e.message);
            return null;
        }
    }

    setMainDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oMainDic = this._loadDictionary(dictionary);
        return bool(this.oMainDic);
        return Boolean(this.oMainDic);
    }

    setExtendedDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oExtendedDic = this._loadDictionary(dictionary);
        return bool(this.oExtendedDic);
        return Boolean(this.oExtendedDic);
    }

    setPersonalDictionary (dictionary) {
        // returns true if the dictionary is loaded
        this.oPersonalDic = this._loadDictionary(dictionary);
        return bool(this.oPersonalDic);
        return Boolean(this.oPersonalDic);
    }

    // IBDAWG functions

    isValidToken (sToken) {
        // checks if sToken is valid (if there is hyphens in sToken, sToken is split, each part is checked)
        if (this.oMainDic.isValidToken(sToken)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.isValidToken(sToken)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.isValidToken(sToken)) {
            return true;
        }
        return false;
    }

    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (this.oMainDic.isValid(sToken)) {
        if (this.oMainDic.isValid(sWord)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.isValid(sToken)) {
        if (this.oExtendedDic && this.oExtendedDic.isValid(sWord)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.isValid(sToken)) {
        if (this.oPersonalDic && this.oPersonalDic.isValid(sWord)) {
            return true;
        }
        return false;
    }

    lookup (sWord) {
        // checks if sWord is in dictionary as is (strict verification)
        if (this.oMainDic.lookup(sToken)) {
        if (this.oMainDic.lookup(sWord)) {
            return true;
        }
        if (this.oExtendedDic && this.oExtendedDic.lookup(sToken)) {
        if (this.oExtendedDic && this.oExtendedDic.lookup(sWord)) {
            return true;
        }
        if (this.oPersonalDic && this.oPersonalDic.lookup(sToken)) {
        if (this.oPersonalDic && this.oPersonalDic.lookup(sWord)) {
            return true;
        }
        return false;
    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let lResult = this.oMainDic.getMorph(sToken);
        let lResult = this.oMainDic.getMorph(sWord);
        if (this.oExtendedDic) {
            lResult.extends(this.oExtendedDic.getMorph(sToken));
            lResult.push(...this.oExtendedDic.getMorph(sWord));
        }
        if (this.oPersonalDic) {
            lResult.extends(this.oPersonalDic.getMorph(sToken));
            lResult.push(...this.oPersonalDic.getMorph(sWord));
        }
        return lResult;
    }

    * suggest (sWord, nSuggLimit=10) {
        // generator: returns 1,2 or 3 lists of suggestions
        // generator: returns 1, 2 or 3 lists of suggestions
        yield this.oMainDic.suggest(sWord, nSuggLimit);
        if (this.oExtendedDic) {
            yield this.oExtendedDic.suggest(sWord, nSuggLimit);
        }
        if (this.oPersonalDic) {
            yield this.oPersonalDic.suggest(sWord, nSuggLimit);
        }

Modified graphspell-js/tokenizer.js from [d6429837c4] to [c3f0ee8c90].

84
85
86
87
88
89
90
91

92
93
94

95
96
97
98
99
100
101
102
103
104
105
84
85
86
87
88
89
90

91
92
93

94
95
96
97
98
99
100
101
102
103
104
105







-
+


-
+











                }
            }
            i += nCut;
            sText = sText.slice(nCut);
        }
    }

    getSpellingErrors (sText, oDict) {
    getSpellingErrors (sText, oSpellChecker) {
        let aSpellErr = [];
        for (let oToken of this.genTokens(sText)) {
            if (oToken.sType === 'WORD' && !oDict.isValidToken(oToken.sValue)) {
            if (oToken.sType === 'WORD' && !oSpellChecker.isValidToken(oToken.sValue)) {
                aSpellErr.push(oToken);
            }
        }
        return aSpellErr;
    }
}


if (typeof(exports) !== 'undefined') {
    exports.Tokenizer = Tokenizer;
}

Modified graphspell/ibdawg.py from [6527ad55bf] to [3bf18d8144].

160
161
162
163
164
165
166
167

168
169
170
171
172
173
174
160
161
162
163
164
165
166

167
168
169
170
171
172
173
174







-
+







        self.nNode = int(l.pop(0))
        self.nArc = int(l.pop(0))
        self.nAff = int(l.pop(0))
        self.cStemming = l.pop(0)
        self.nTag = self.nArcVal - self.nChar - self.nAff
        # <dChar> to get the value of an arc, <dCharVal> to get the char of an arc with its value
        self.dChar = {}
        for i in range(1, self.nChar):
        for i in range(1, self.nChar+1):
            self.dChar[self.lArcVal[i]] = i
        self.dCharVal = { v: k  for k, v in self.dChar.items() }
        self.nBytesOffset = 1 # version 3

    def _initJSON (self):
        "initialize with a JSON text file"
        self.__dict__.update(json.loads(self.by.decode("utf-8")))

Modified graphspell/spellchecker.py from [19c7dd4df2] to [638f8d8cdf].

15
16
17
18
19
20
21
22

23
24
25
26
27
28
29

30
31
32
33
34

35
36
37
38
39
40
41




42
43
44
45
46
47
48
15
16
17
18
19
20
21

22
23
24
25
26
27
28

29
30
31

32

33
34
35
36
37
38


39
40
41
42
43
44
45
46
47
48
49







-
+






-
+


-

-
+





-
-
+
+
+
+








dDefaultDictionaries = {
    "fr": "fr.bdic",
    "en": "en.bdic"
}


class Spellchecker ():
class SpellChecker ():

    def __init__ (self, sLangCode, sfMainDic="", sfExtendedDic="", sfPersonalDic=""):
        "returns True if the main dictionary is loaded"
        self.sLangCode = sLangCode
        if not sfMainDic:
            sfMainDic = dDefaultDictionaries.get(sLangCode, "")
        self.oMainDic = self._loadDictionary(sfMainDic)
        self.oMainDic = self._loadDictionary(sfMainDic, True)
        self.oExtendedDic = self._loadDictionary(sfExtendedDic)
        self.oPersonalDic = self._loadDictionary(sfPersonalDic)
        return bool(self.oMainDic)

    def _loadDictionary (self, sfDictionary):
    def _loadDictionary (self, sfDictionary, bNecessary=False):
        "returns an IBDAWG object"
        if not sfDictionary:
            return None
        try:
            return ibdawg.IBDAWG(sfDictionary)
        except:
            print("Error: <" + sDicName + "> not loaded.")
        except Exception as e:
            if bNecessary:
                raise Exception(str(e), "Error: <" + sfDictionary + "> not loaded.")
            print("Error: <" + sfDictionary + "> not loaded.")
            traceback.print_exc()
            return None

    def setMainDictionary (self, sfDictionary):
        "returns True if the dictionary is loaded"
        self.oMainDic = self._loadDictionary(sfDictionary)
        return bool(self.oMainDic)
68
69
70
71
72
73
74
75

76
77

78
79

80
81
82
83
84
85

86
87

88
89

90
91
92
93
94
95

96
97

98
99

100
101
102
103

104
105
106
107
108
109
110
111
112
113
114
115
116









69
70
71
72
73
74
75

76
77

78
79

80
81
82
83
84
85

86
87

88
89

90
91
92
93
94
95

96
97

98
99

100
101
102
103

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126







-
+

-
+

-
+





-
+

-
+

-
+





-
+

-
+

-
+



-
+













+
+
+
+
+
+
+
+
+
            return True
        if self.oPersonalDic and self.oPersonalDic.isValidToken(sToken):
            return True
        return False

    def isValid (self, sWord):
        "checks if sWord is valid (different casing tested if the first letter is a capital)"
        if self.oMainDic.isValid(sToken):
        if self.oMainDic.isValid(sWord):
            return True
        if self.oExtendedDic and self.oExtendedDic.isValid(sToken):
        if self.oExtendedDic and self.oExtendedDic.isValid(sWord):
            return True
        if self.oPersonalDic and self.oPersonalDic.isValid(sToken):
        if self.oPersonalDic and self.oPersonalDic.isValid(sWord):
            return True
        return False

    def lookup (self, sWord):
        "checks if sWord is in dictionary as is (strict verification)"
        if self.oMainDic.lookup(sToken):
        if self.oMainDic.lookup(sWord):
            return True
        if self.oExtendedDic and self.oExtendedDic.lookup(sToken):
        if self.oExtendedDic and self.oExtendedDic.lookup(sWord):
            return True
        if self.oPersonalDic and self.oPersonalDic.lookup(sToken):
        if self.oPersonalDic and self.oPersonalDic.lookup(sWord):
            return True
        return False

    def getMorph (self, sWord):
        "retrieves morphologies list, different casing allowed"
        lResult = self.oMainDic.getMorph(sToken)
        lResult = self.oMainDic.getMorph(sWord)
        if self.oExtendedDic:
            lResult.extends(self.oExtendedDic.getMorph(sToken))
            lResult.extend(self.oExtendedDic.getMorph(sWord))
        if self.oPersonalDic:
            lResult.extends(self.oPersonalDic.getMorph(sToken))
            lResult.extend(self.oPersonalDic.getMorph(sWord))
        return lResult

    def suggest (self, sWord, nSuggLimit=10):
        "generator: returns 1,2 or 3 lists of suggestions"
        "generator: returns 1, 2 or 3 lists of suggestions"
        yield self.oMainDic.suggest(sWord, nSuggLimit)
        if self.oExtendedDic:
            yield self.oExtendedDic.suggest(sWord, nSuggLimit)
        if self.oPersonalDic:
            yield self.oPersonalDic.suggest(sWord, nSuggLimit)

    def select (self, sPattern=""):
        "generator: returns all entries which morphology fits <sPattern>"
        yield from self.oMainDic.select(sPattern)
        if self.oExtendedDic:
            yield from self.oExtendedDic.select(sPattern)
        if self.oPersonalDic:
            yield from self.oPersonalDic.select(sPattern)

    def drawPath (self, sWord):
        self.oMainDic.drawPath(sWord)
        if self.oExtendedDic:
            print("-----")
            self.oExtendedDic.drawPath(sWord)
        if self.oPersonalDic:
            print("-----")
            self.oPersonalDic.drawPath(sWord)

Modified make.py from [b310b82ca7] to [6a76dc767a].

74
75
76
77
78
79
80
81

82
83
84
85
86
87
88
74
75
76
77
78
79
80

81
82
83
84
85
86
87
88







-
+







def createOXT (spLang, dVars, dOxt, spLangPack, bInstall):
    "create extension for Writer"
    print("Building extension for Writer")
    spfZip = "_build/" + dVars['name'] + "-"+ dVars['lang'] +"-v" + dVars['version'] + '.oxt'
    hZip = zipfile.ZipFile(spfZip, mode='w', compression=zipfile.ZIP_DEFLATED)

    # Package and parser
    copyGrammalectePyPackageInZipFile(hZip, spLangPack, dVars['dic_filename']+".bdic", "pythonpath/")
    copyGrammalectePyPackageInZipFile(hZip, spLangPack, "pythonpath/")
    hZip.write("grammalecte-cli.py", "pythonpath/grammalecte-cli.py")

    # Extension files
    hZip.writestr("META-INF/manifest.xml", helpers.fileFile("gc_core/py/oxt/manifest.xml", dVars))
    hZip.writestr("description.xml", helpers.fileFile("gc_core/py/oxt/description.xml", dVars))
    hZip.writestr("Linguistic.xcu", helpers.fileFile("gc_core/py/oxt/Linguistic.xcu", dVars))
    hZip.writestr("Grammalecte.py", helpers.fileFile("gc_core/py/oxt/Grammalecte.py", dVars))
152
153
154
155
156
157
158
159

160
161
162
163
164
165
166
167

168
169
170
171
172
173

174


175
176
177
178
179
180
181
152
153
154
155
156
157
158

159
160
161
162
163
164
165
166

167
168
169
170
171
172
173
174

175
176
177
178
179
180
181
182
183







-
+







-
+






+
-
+
+







        hDst.write("html = 1\n")


def createPackageZip (sLang, dVars, spLangPack):
    "create server zip"
    spfZip = "_build/" + dVars['name'] + "-"+ dVars['lang'] +"-v" + dVars['version'] + '.zip'
    hZip = zipfile.ZipFile(spfZip, mode='w', compression=zipfile.ZIP_DEFLATED)
    copyGrammalectePyPackageInZipFile(hZip, spLangPack, dVars['dic_filename']+".bdic")
    copyGrammalectePyPackageInZipFile(hZip, spLangPack)
    for spf in ["grammalecte-cli.py", "grammalecte-server.py", "bottle.py", \
                "grammalecte-server-options._global.ini", "grammalecte-server-options."+sLang+".ini", \
                "README.txt", "LICENSE.txt", "LICENSE.fr.txt"]:
        hZip.write(spf)
    hZip.writestr("setup.py", helpers.fileFile("gc_lang/fr/setup.py", dVars))


def copyGrammalectePyPackageInZipFile (hZip, spLangPack, sfDict, sAddPath=""):
def copyGrammalectePyPackageInZipFile (hZip, spLangPack, sAddPath=""):
    for sf in os.listdir("grammalecte"):
        if not os.path.isdir("grammalecte/"+sf):
            hZip.write("grammalecte/"+sf, sAddPath+"grammalecte/"+sf)
    for sf in os.listdir("grammalecte/graphspell"):
        if not os.path.isdir("grammalecte/graphspell/"+sf):
            hZip.write("grammalecte/graphspell/"+sf, sAddPath+"grammalecte/graphspell/"+sf)
    for sf in os.listdir("grammalecte/graphspell/_dictionaries"):
    hZip.write("grammalecte/graphspell/_dictionaries/"+sfDict, sAddPath+"grammalecte/graphspell/_dictionaries/"+sfDict)
        if not os.path.isdir("grammalecte/graphspell/_dictionaries/"+sf):
            hZip.write("grammalecte/graphspell/_dictionaries/"+sf, sAddPath+"grammalecte/graphspell/_dictionaries/"+sf)
    for sf in os.listdir(spLangPack):
        if not os.path.isdir(spLangPack+"/"+sf):
            hZip.write(spLangPack+"/"+sf, sAddPath+spLangPack+"/"+sf)


def create (sLang, xConfig, bInstallOXT, bJavaScript):
    oNow = datetime.datetime.now()
300
301
302
303
304
305
306
307
308
309
310
311
312



















313
314
315




316
317
318
319
320















321
322
323
324
325
326
327
328
329
330
331
332
333
334


335
336
337
338
339
340
341
302
303
304
305
306
307
308






309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327



328
329
330
331
332
333



334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371







-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
+


-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+














+
+







            dVars[sf[:-3]] = open("js_extension/"+sf, "r", encoding="utf-8").read()
        for sf in os.listdir("graphspell-js"):
            if not os.path.isdir("graphspell-js/"+sf):
                file_util.copy_file("graphspell-js/"+sf, "grammalecte-js/graphspell")
                helpers.copyAndFileTemplate("graphspell-js/"+sf, "grammalecte-js/graphspell/"+sf, dVars)


def copyGraphspellDictionary (dVars, bJavaScript=False):
    spfPyDic = "graphspell/_dictionaries/"+dVars['dic_filename']+".bdic"
    spfJSDic = "graphspell-js/_dictionaries/"+dVars['dic_filename']+".json"
    if not os.path.isfile(spfPyDic) or (bJavaScript and not os.path.isfile(spfJSDic)):
        buildDictionary(dVars, bJavaScript)
    file_util.copy_file(spfPyDic, "grammalecte/graphspell/_dictionaries")
def copyGraphspellDictionaries (dVars, bJavaScript=False, bExtendedDict=False, bPersonalDict=False):
    dVars["dic_main_filename_py"] = ""
    dVars["dic_main_filename_js"] = ""
    dVars["dic_extended_filename_py"] = ""
    dVars["dic_extended_filename_js"] = ""
    dVars["dic_personal_filename_py"] = ""
    dVars["dic_personal_filename_js"] = ""
    lDict = [ ("main", dVars['dic_filename']) ]
    if bExtendedDict:
        lDict.append(("extended", dVars['dic_extended_filename']))
    if bPersonalDict:
        lDict.append(("personal", dVars['dic_personal_filename']))
    for sType, sFileName in lDict:
        spfPyDic = "graphspell/_dictionaries/" + sFileName + ".bdic"
        spfJSDic = "graphspell-js/_dictionaries/" + sFileName + ".json"
        if not os.path.isfile(spfPyDic) or (bJavaScript and not os.path.isfile(spfJSDic)):
            buildDictionary(dVars, sType, bJavaScript)
        print(spfPyDic)
        file_util.copy_file(spfPyDic, "grammalecte/graphspell/_dictionaries")
    file_util.copy_file(spfPyDic[:-5]+".info.txt", "grammalecte/graphspell/_dictionaries")
    if bJavaScript:
        file_util.copy_file(spfJSDic, "grammalecte-js/graphspell/_dictionaries")
        dVars['dic_'+sType+'_filename_py'] = sFileName + '.bdic'
        if bJavaScript:
            file_util.copy_file(spfJSDic, "grammalecte-js/graphspell/_dictionaries")
            dVars['dic_'+sType+'_filename_js'] = sFileName + '.json'


def buildDictionary (dVars, bJavaScript):
    lex_build.build(dVars['lexicon_src'], dVars['lang'], dVars['lang_name'], dVars['dic_filename'], \
                    bJavaScript, dVars['dic_name'], dVars['stemming_method'], int(dVars['fsa_method']))
def buildDictionary (dVars, sType, bJavaScript=False):
    if sType == "main":
        spfLexSrc = dVars['lexicon_src']
        sfDictDst = dVars['dic_filename']
        sDicName = dVars['dic_name']
    elif sType == "extended":
        spfLexSrc = dVars['lexicon_extended_src']
        sfDictDst = dVars['dic_extended_filename']
        sDicName = dVars['dic_extended_name']
    elif sType == "personal":
        spfLexSrc = dVars['lexicon_personal_src']
        sfDictDst = dVars['dic_personal_filename']
        sDicName = dVars['dic_personal_name']
    lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, dVars['stemming_method'], int(dVars['fsa_method']))



def main ():
    print("Python: " + sys.version)
    xParser = argparse.ArgumentParser()
    xParser.add_argument("lang", type=str, nargs='+', help="lang project to generate (name of folder in /lang)")
    xParser.add_argument("-b", "--build_data", help="launch build_data.py (part 1 and 2)", action="store_true")
    xParser.add_argument("-bb", "--build_data_before", help="launch build_data.py (only part 1: before dictionary building)", action="store_true")
    xParser.add_argument("-ba", "--build_data_after", help="launch build_data.py (only part 2: before dictionary building)", action="store_true")
    xParser.add_argument("-d", "--dict", help="generate FSA dictionary", action="store_true")
    xParser.add_argument("-t", "--tests", help="run unit tests", action="store_true")
    xParser.add_argument("-p", "--perf", help="run performance tests", action="store_true")
    xParser.add_argument("-pm", "--perf_memo", help="run performance tests and store results in perf_memo.txt", action="store_true")
    xParser.add_argument("-js", "--javascript", help="JavaScript build for Firefox", action="store_true")
    xParser.add_argument("-aed", "--add_extended_dictionary", help="add extended dictionary to the build", action="store_true")
    xParser.add_argument("-apd", "--add_personal_dictionary", help="add personal dictionary to the build", action="store_true")
    xParser.add_argument("-fx", "--firefox", help="Launch Firefox Developper for WebExtension testing", action="store_true")
    xParser.add_argument("-we", "--web_ext", help="Launch Firefox Nightly for WebExtension testing", action="store_true")
    xParser.add_argument("-tb", "--thunderbird", help="Launch Thunderbird", action="store_true")
    xParser.add_argument("-i", "--install", help="install the extension in Writer (path of unopkg must be set in config.ini)", action="store_true")
    xArgs = xParser.parse_args()

    if xArgs.build_data:
350
351
352
353
354
355
356





357
358
359
360
361
362
363
364
365
366
367
368





369
370
371
372
373

374
375
376
377
378
379
380
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402

403
404
405
406
407
408
409
410
411

412
413
414
415
416
417
418
419







+
+
+
+
+











-
+
+
+
+
+




-
+







    copyGraphspellCore(xArgs.javascript)

    for sLang in xArgs.lang:
        if os.path.exists("gc_lang/"+sLang) and os.path.isdir("gc_lang/"+sLang):
            xConfig = getConfig(sLang)
            dVars = xConfig._sections['args']

            if not dVars["lexicon_extended_src"]:
                xArgs.add_extended_dictionary = False
            if not dVars["lexicon_personal_src"]:
                xArgs.add_personal_dictionary = False

            # build data
            build_data_module = None
            if xArgs.build_data_before or xArgs.build_data_after:
                # lang data
                try:
                    build_data_module = importlib.import_module("gc_lang."+sLang+".build_data")
                except ImportError:
                    print("# Error. Couldn’t import file build_data.py in folder gc_lang/"+sLang)
            if build_data_module and xArgs.build_data_before:
                build_data_module.before('gc_lang/'+sLang, dVars, xArgs.javascript)
            if xArgs.dict:
                buildDictionary(dVars, xArgs.javascript)
                buildDictionary(dVars, "main", xArgs.javascript)
                if xArgs.add_extended_dictionary:
                    buildDictionary(dVars, "extended", xArgs.javascript)
                if xArgs.add_personal_dictionary:
                    buildDictionary(dVars, "personal", xArgs.javascript)
            if build_data_module and xArgs.build_data_after:
                build_data_module.after('gc_lang/'+sLang, dVars, xArgs.javascript)

            # copy dictionaries from Graphspell
            copyGraphspellDictionary(dVars, xArgs.javascript)
            copyGraphspellDictionaries(dVars, xArgs.javascript, xArgs.add_extended_dictionary, xArgs.add_personal_dictionary)

            # make
            sVersion = create(sLang, xConfig, xArgs.install, xArgs.javascript, )

            # tests
            if xArgs.tests or xArgs.perf or xArgs.perf_memo:
                print("> Running tests")

Modified misc/wconsole.bat from [8f10aedb6b] to [e648fe9833].

1
2
3
4


5
6
7
8
9
1
2


3
4
5
6
7
8
9


-
-
+
+





Rem Create console for LibreOffice at launch

"C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\editbin.exe" /subsystem:console "C:\Program Files\LibreOffice 5\program\soffice.exe"
"C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\editbin.exe" /subsystem:console "C:\Program Files\LibreOffice 5\program\soffice.bin"
"C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\editbin.exe" /subsystem:console "C:\Program Files\LibreOffice\program\soffice.exe"
"C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\editbin.exe" /subsystem:console "C:\Program Files\LibreOffice\program\soffice.bin"

Rem "C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\editbin.exe" /subsystem:console "C:\cygwin64\home\Z7\bibisect-win32-5.3\instdir\program\soffice.exe"
Rem "C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\editbin.exe" /subsystem:console "C:\cygwin64\home\Z7\bibisect-win32-5.3\instdir\program\soffice.bin"