Grammalecte  Check-in [18db5d65f0]

Overview
Comment:[core][cli][server][graphspell][fx] use spellchecker instead of ibdawg
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | cli | core | server | fx | graphspell | multid
Files: files | file ages | folders
SHA3-256: 18db5d65f059826efb9bcdcfbdeb764561471eb3b69fb11dd4b4b7d301097d5c
User & Date: olr on 2018-02-13 15:44:31
Other Links: branch diff | manifest | tags
Context
2018-02-13
15:57
[cli] suggest() in spellchecker is a generator check-in: 966babe645 user: olr tags: cli, multid
15:44
[core][cli][server][graphspell][fx] use spellchecker instead of ibdawg check-in: 18db5d65f0 user: olr tags: cli, core, server, fx, graphspell, multid
14:26
[core] use spellchecker instead of ibdawg directly check-in: 62304c0cd5 user: olr tags: core, multid
Changes

Modified gc_core/js/lang_core/gc_engine.js from [dcb651b444] to [454f9a423d].

331
332
333
334
335
336
337
338

339
340
341
342
343
344
345
331
332
333
334
335
336
337

338
339
340
341
342
343
344
345







-
+







            _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
        }
        catch (e) {
            helpers.logerror(e);
        }
    },

    getDictionary: function () {
    getSpellChecker: function () {
        return _oSpellChecker;
    },

    //// Options

    setOption: function (sOpt, bVal) {
        if (_dOptions.has(sOpt)) {
640
641
642
643
644
645
646
647

648
649
650
651
652
653
640
641
642
643
644
645
646

647
648
649
650
651
652
653







-
+






    exports._rewrite = gc_engine._rewrite;
    exports.ignoreRule = gc_engine.ignoreRule;
    exports.resetIgnoreRules = gc_engine.resetIgnoreRules;
    exports.reactivateRule = gc_engine.reactivateRule;
    exports.listRules = gc_engine.listRules;
    exports._getRules = gc_engine._getRules;
    exports.load = gc_engine.load;
    exports.getDictionary = gc_engine.getDictionary;
    exports.getSpellChecker = gc_engine.getSpellChecker;
    exports.setOption = gc_engine.setOption;
    exports.setOptions = gc_engine.setOptions;
    exports.getOptions = gc_engine.getOptions;
    exports.getDefaultOptions = gc_engine.getDefaultOptions;
    exports.resetOptions = gc_engine.resetOptions;
}

Modified gc_core/py/lang_core/gc_engine.py from [e36972dc16] to [eded256052].

10
11
12
13
14
15
16
17

18
19
20
21
22
23
24
10
11
12
13
14
15
16

17
18
19
20
21
22
23
24







-
+








from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from . import gc_options


__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
            "load", "parse", "getDictionary", \
            "load", "parse", "getSpellChecker", \
            "setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
            "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules" ]

__version__ = "${version}"


lang = "${lang}"
329
330
331
332
333
334
335
336

337
338
339
340
341
342
343
329
330
331
332
333
334
335

336
337
338
339
340
341
342
343







-
+









def resetOptions ():
    global _dOptions
    _dOptions = dict(gc_options.getOptions(_sAppContext))


def getDictionary ():
def getSpellChecker ():
    return _oSpellChecker


def _getRules (bParagraph):
    try:
        if not bParagraph:
            return _rules.lSentenceRules

Modified gc_lang/fr/modules-js/lexicographe.js from [be510450a4] to [e3263a5103].

222
223
224
225
226
227
228
229
230


231
232
233
234
235
236
237
222
223
224
225
226
227
228


229
230
231
232
233
234
235
236
237







-
-
+
+







    ['<', "inférieur à"],
    ['>', "supérieur à"],
]);


class Lexicographe {

    constructor (oDict, oTokenizer, oLocGraph) {
        this.oDict = oDict;
    constructor (oSpellChecker, oTokenizer, oLocGraph) {
        this.oSpellChecker = oSpellChecker;
        this.oTokenizer = oTokenizer;
        this.oLocGraph = JSON.parse(oLocGraph);

        this._zPartDemForm = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(là|ci)$", "i");
        this._aPartDemExceptList = new Set(["celui", "celle", "ceux", "celles", "de", "jusque", "par", "marie-couche-toi"]);
        this._zInterroVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(t-(?:il|elle|on)|je|tu|ils?|elles?|on|[nv]ous)$", "i");
        this._zImperatifVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|y|en|[mts][’'](?:y|en)|les?|la|[mt]oi|leur|lui)$", "i");
337
338
339
340
341
342
343
344

345
346
347
348
349
350
351
337
338
339
340
341
342
343

344
345
346
347
348
349
350
351







-
+







                            sValue: oToken.sValue,
                            aLabel: ["forme verbale interrogative"],
                            aSubElem: [
                                { sType: oToken.sType, sValue: m[1],       aLabel: this._getMorph(m[1]) },
                                { sType: oToken.sType, sValue: "-" + m[2], aLabel: [this._formatSuffix(m[2].toLowerCase())] }
                            ]
                        };
                    } else if (this.oDict.isValidToken(oToken.sValue)) {
                    } else if (this.oSpellChecker.isValidToken(oToken.sValue)) {
                        return {
                            sType: oToken.sType,
                            sValue: oToken.sValue,
                            aLabel: this._getMorph(oToken.sValue)
                        };
                    } else {
                        return {
360
361
362
363
364
365
366
367

368
369
370
371
372
373
374
360
361
362
363
364
365
366

367
368
369
370
371
372
373
374







-
+







            helpers.logerror(e);
        }
        return null;
    }

    _getMorph (sWord) {
        let aElem = [];
        for (let s of this.oDict.getMorph(sWord)) {
        for (let s of this.oSpellChecker.getMorph(sWord)) {
            if (s.includes(":")) aElem.push(this._formatTags(s));
        }
        if (aElem.length == 0) {
            aElem.push("mot inconnu du dictionnaire");
        }
        return aElem;
    }

Modified gc_lang/fr/modules/lexicographe.py from [7b36598d08] to [75ede82f17].

153
154
155
156
157
158
159
160
161


162
163
164
165
166
167
168
153
154
155
156
157
158
159


160
161
162
163
164
165
166
167
168







-
-
+
+







    "t'en": " (te) pronom personnel objet + (en) pronom adverbial",
    "s'en": " (se) pronom personnel objet + (en) pronom adverbial",
}


class Lexicographe:

    def __init__ (self, oDict):
        self.oDict = oDict
    def __init__ (self, oSpellChecker):
        self.oSpellChecker = oSpellChecker
        self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
        self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
        self._zTag = re.compile("[:;/][\\w*][^:;/]*")

    def analyzeWord (self, sWord):
        try:
            if not sWord:
179
180
181
182
183
184
185
186

187
188
189
190
191
192
193
179
180
181
182
183
184
185

186
187
188
189
190
191
192
193







-
+







                sWord = m.group(2)
                aMorph.append( "{}’ : {}".format(m.group(1), _dPFX.get(m.group(1).lower(), "[?]")) )
            # mots composés
            m2 = self._zCompoundWord.match(sWord)
            if m2:
                sWord = m2.group(1)
            # Morphologies
            lMorph = self.oDict.getMorph(sWord)
            lMorph = self.oSpellChecker.getMorph(sWord)
            if len(lMorph) > 1:
                # sublist
                aMorph.append( (sWord, [ self.formatTags(s)  for s in lMorph  if ":" in s ]) )
            elif len(lMorph) == 1:
                aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
            else:
                aMorph.append( "{} :  inconnu du dictionnaire".format(sWord) )

Modified gc_lang/fr/webext/gce_worker.js from [30916bedcd] to [fb2b2e5711].

133
134
135
136
137
138
139
140

141
142
143
144
145
146
147
133
134
135
136
137
138
139

140
141
142
143
144
145
146
147







-
+







    }
}



let bInitDone = false;

let oDict = null;
let oSpellChecker = null;
let oTokenizer = null;
let oLxg = null;
let oTest = null;
let oLocution = null;


/*
158
159
160
161
162
163
164
165

166
167
168
169
170
171

172
173
174
175
176
177
178
158
159
160
161
162
163
164

165
166
167
168
169
170

171
172
173
174
175
176
177
178







-
+





-
+







        if (!bInitDone) {
            //console.log("[Worker] Loading… Extension path: " + sExtensionPath);
            conj.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/conj_data.json"));
            phonet.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/phonet_data.json"));
            mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json"));
            //console.log("[Worker] Modules have been initialized…");
            gc_engine.load(sContext, sExtensionPath+"grammalecte/graphspell/_dictionaries");
            oDict = gc_engine.getDictionary();
            oSpellChecker = gc_engine.getSpellChecker();
            oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json");
            oTokenizer = new Tokenizer("fr");

            oLocution =  helpers.loadFile(sExtensionPath + "/grammalecte/fr/locutions_data.json");

            oLxg = new Lexicographe(oDict, oTokenizer, oLocution);
            oLxg = new Lexicographe(oSpellChecker, oTokenizer, oLocution);
            if (dOptions !== null) {
                gc_engine.setOptions(dOptions);
            }
            //tests();
            bInitDone = true;
        } else {
            console.log("[Worker] Already initialized…")
197
198
199
200
201
202
203
204

205
206
207
208
209
210
211
212
213
214

215
216
217
218
219
220
221
197
198
199
200
201
202
203

204
205
206
207
208
209
210
211
212
213

214
215
216
217
218
219
220
221







-
+









-
+







}

function parseAndSpellcheck (sText, sCountry, bDebug, bContext, dInfo={}) {
    let i = 0;
    sText = sText.replace(/­/g, "").normalize("NFC");
    for (let sParagraph of text.getParagraph(sText)) {
        let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
        postMessage(createResponse("parseAndSpellcheck", {sParagraph: sParagraph, iParaNum: i, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, false));
        i += 1;
    }
    postMessage(createResponse("parseAndSpellcheck", null, dInfo, true));
}

function parseAndSpellcheck1 (sParagraph, sCountry, bDebug, bContext, dInfo={}) {
    sParagraph = sParagraph.replace(/­/g, "").normalize("NFC");
    let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
    postMessage(createResponse("parseAndSpellcheck1", {sParagraph: sParagraph, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, true));
}

function getOptions (dInfo={}) {
    postMessage(createResponse("getOptions", gc_engine.getOptions(), dInfo, true));
}

288
289
290
291
292
293
294
295

296
297
298

299
300




301
302
303
304
305
306
307
288
289
290
291
292
293
294

295
296
297
298
299


300
301
302
303
304
305
306
307
308
309
310







-
+



+
-
-
+
+
+
+







    postMessage(createResponse("fullTests", sMsg, dInfo, true));
}


// Spellchecker

function getSpellSuggestions (sWord, dInfo) {
    if (!oDict) {
    if (!oSpellChecker) {
        postMessage(createResponse("getSpellSuggestions", "# Error. Dictionary not loaded.", dInfo, true));
        return;
    }
    let i = 1;
    let aSugg = oDict.suggest(sWord);
    postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg}, dInfo, true));
    for (let aSugg of oSpellChecker.suggest(sWord)) {
        postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg, iSugg: i}, dInfo, true));
        i += 1;
    }
}


// Lexicographer

function getListOfTokens (sText, dInfo={}) {
    try {

Modified grammalecte-cli.py from [07800caa2b] to [93945e25e2].

40
41
42
43
44
45
46
47

48
49
50
51
52

53
54

55
56
57
58
59
60


61
62
63
64
65
66
67


68
69
70
71
72
73
74
40
41
42
43
44
45
46

47
48
49
50
51

52
53

54
55
56
57
58


59
60
61
62
63
64
65


66
67
68
69
70
71
72
73
74







-
+




-
+

-
+




-
-
+
+





-
-
+
+







    if sys.platform == "win32":
        # Apparently, the console transforms «’» in «'».
        # So we reverse it to avoid many useless warnings.
        sText = sText.replace("'", "’")
    return sText


def _getErrors (sText, oTokenizer, oDict, bContext=False, bSpellSugg=False, bDebug=False):
def _getErrors (sText, oTokenizer, oSpellChecker, bContext=False, bSpellSugg=False, bDebug=False):
    "returns a tuple: (grammar errors, spelling errors)"
    aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
            if bSpellSugg:
                dToken['aSuggestions'] = oDict.suggest(dToken['sValue'])
                dToken['aSuggestions'] = oSpellChecker.suggest(dToken['sValue'])
            aSpellErrs.append(dToken)
    return aGrammErrs, aSpellErrs


def generateText (sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bSpellSugg, bDebug)
def generateText (sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, False, bSpellSugg, bDebug)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)


def generateJSON (iIndex, sText, oTokenizer, oDict, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, bContext, bSpellSugg, bDebug)
def generateJSON (iIndex, sText, oTokenizer, oSpellChecker, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, bContext, bSpellSugg, bDebug)
    aGrammErrs = list(aGrammErrs)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    if lLineSet:
        aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
        return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    if bReturnText:
126
127
128
129
130
131
132
133

134
135

136
137
138
139
140
141
142
143
144
145
146
147

148
149
150
151
152
153
154
126
127
128
129
130
131
132

133
134

135
136
137
138
139
140
141
142
143
144
145
146

147
148
149
150
151
152
153
154







-
+

-
+











-
+







    xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
    xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
    xArgs = xParser.parse_args()

    gce.load()
    if not xArgs.json:
        echo("Grammalecte v{}".format(gce.version))
    oDict = gce.getDictionary()
    oSpellChecker = gce.getSpellChecker()
    oTokenizer = tkz.Tokenizer("fr")
    oLexGraphe = lxg.Lexicographe(oDict)
    oLexGraphe = lxg.Lexicographe(oSpellChecker)
    if xArgs.textformatter or xArgs.textformatteronly:
        oTF = tf.TextFormatter()

    if xArgs.list_options or xArgs.list_rules:
        if xArgs.list_options:
            gce.displayOptions("fr")
        if xArgs.list_rules:
            gce.displayRules(None  if xArgs.list_rules == "*"  else xArgs.list_rules)
        exit()

    if xArgs.suggest:
        lSugg = oDict.suggest(xArgs.suggest)
        lSugg = oSpellChecker.suggest(xArgs.suggest)
        if xArgs.json:
            sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
        else:
            sText = "Suggestions : " + " | ".join(lSugg)
        echo(sText)
        exit()

177
178
179
180
181
182
183
184

185
186

187
188
189
190
191
192
193
194
195
196
197
198
199

200
201

202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220

221
222
223
224
225
226


227
228

229
230

231
232
233
234
235
236
237
177
178
179
180
181
182
183

184
185

186
187
188
189
190
191
192
193
194
195
196
197
198

199
200

201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219

220
221
222
223
224


225
226
227

228
229

230
231
232
233
234
235
236
237







-
+

-
+












-
+

-
+


















-
+




-
-
+
+

-
+

-
+







            for i, sText in enumerate(readfile(sFile), 1):
                if xArgs.textformatter or xArgs.textformatteronly:
                    sText = oTF.formatText(sText)
                if xArgs.textformatteronly:
                    output(sText, hDst)
                else:
                    if xArgs.json:
                        sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
                        sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
                    else:
                        sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                        sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                    if sText:
                        if xArgs.json and bComma:
                            output(",\n", hDst)
                        output(sText, hDst)
                        bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        else:
            # concaténation des lignes non séparées par une ligne vide
            for i, lLine in enumerate(readfileAndConcatLines(sFile), 1):
                sText, lLineSet = txt.createParagraphWithLines(lLine)
                if xArgs.json:
                    sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
                    sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
                else:
                    sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                    sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
                if sText:
                    if xArgs.json and bComma:
                        output(",\n", hDst)
                    output(sText, hDst)
                    bComma = True
                if hDst:
                    echo("§ %d\r" % i, end="", flush=True)
        if xArgs.json:
            output("\n]}\n", hDst)
    else:
        # pseudo-console
        sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n"
        sText = _getText(sInputText)
        while True:
            if sText.startswith("?"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo("* " + sWord)
                        for sMorph in oDict.getMorph(sWord):
                        for sMorph in oSpellChecker.getMorph(sWord):
                            echo("  {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
            elif sText.startswith("!"):
                for sWord in sText[1:].strip().split():
                    if sWord:
                        echo(" | ".join(oDict.suggest(sWord)))
                        #echo(" | ".join(oDict.suggest2(sWord)))
                        echo(" | ".join(oSpellChecker.suggest(sWord)))
                        #echo(" | ".join(oSpellChecker.suggest2(sWord)))
            elif sText.startswith(">"):
                oDict.drawPath(sText[1:].strip())
                oSpellChecker.drawPath(sText[1:].strip())
            elif sText.startswith("="):
                for sRes in oDict.select(sText[1:].strip()):
                for sRes in oSpellChecker.select(sText[1:].strip()):
                    echo(sRes)
            elif sText.startswith("/+ "):
                gce.setOptions({ opt:True  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
            elif sText.startswith("/- "):
                gce.setOptions({ opt:False  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
                echo("done")
262
263
264
265
266
267
268
269

270
271
272
273
274
275
276
277
278
262
263
264
265
266
267
268

269
270
271
272
273
274
275
276
277
278







-
+









            elif sText.startswith("/rl"):
                # reload (todo)
                pass
            else:
                for sParagraph in txt.getParagraph(sText):
                    if xArgs.textformatter:
                        sText = oTF.formatText(sText)
                    sRes = generateText(sText, oTokenizer, oDict, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
                    sRes = generateText(sText, oTokenizer, oSpellChecker, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
                    if sRes:
                        echo("\n" + sRes)
                    else:
                        echo("\nNo error found.")
            sText = _getText(sInputText)


if __name__ == '__main__':
    main()

Modified grammalecte-server.py from [6dbdf10c60] to [3253a2b2ff].

125
126
127
128
129
130
131
132

133
134
135
136
137

138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

155
156
157
158
159
160
161
125
126
127
128
129
130
131

132
133
134
135
136

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153

154
155
156
157
158
159
160
161







-
+




-
+
















-
+







def genUserId ():
    i = 0
    while True:
        yield str(i)
        i += 1


def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False):
def parseParagraph (iParagraph, sText, oTokenizer, oSpellChecker, dOptions, bDebug=False, bEmptyIfNoErrors=False):
    aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
    aGrammErrs = list(aGrammErrs)
    aSpellErrs = []
    for dToken in oTokenizer.genTokens(sText):
        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
            aSpellErrs.append(dToken)
    if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
        return ""
    return "  " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    

if __name__ == '__main__':

    gce.load("Server")
    echo("Grammalecte v{}".format(gce.version))
    dServerOptions = getServerOptions()
    dGCOptions = getConfigOptions("fr")
    if dGCOptions:
        gce.setOptions(dGCOptions)
    dServerGCOptions = gce.getOptions()
    echo("Grammar options:\n" + " | ".join([ k + ": " + str(v)  for k, v in sorted(dServerGCOptions.items()) ]))
    oDict = gce.getDictionary()
    oSpellChecker = gce.getSpellChecker()
    oTokenizer = tkz.Tokenizer("fr")
    oTF = tf.TextFormatter()
    dUser = {}
    userGenerator = genUserId()

    app = Bottle()

195
196
197
198
199
200
201
202

203
204
205
206
207
208
209
195
196
197
198
199
200
201

202
203
204
205
206
207
208
209







-
+







                dOptions.update(json.loads(request.forms.options))
            except:
                sError = "request options not used"
        sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n'
        for i, sText in enumerate(txt.getParagraph(request.forms.text), 1):
            if bTF:
                sText = oTF.formatText(sText)
            sText = parseParagraph(i, sText, oTokenizer, oDict, dOptions, bEmptyIfNoErrors=True)
            sText = parseParagraph(i, sText, oTokenizer, oSpellChecker, dOptions, bEmptyIfNoErrors=True)
            if sText:
                if bComma:
                    sJSON += ",\n"
                sJSON += sText
                bComma = True
        sJSON += "\n]}\n"
        return sJSON

Modified graphspell-js/tokenizer.js from [d6429837c4] to [c3f0ee8c90].

84
85
86
87
88
89
90
91

92
93
94

95
96
97
98
99
100
101
84
85
86
87
88
89
90

91
92
93

94
95
96
97
98
99
100
101







-
+


-
+







                }
            }
            i += nCut;
            sText = sText.slice(nCut);
        }
    }

    getSpellingErrors (sText, oDict) {
    getSpellingErrors (sText, oSpellChecker) {
        let aSpellErr = [];
        for (let oToken of this.genTokens(sText)) {
            if (oToken.sType === 'WORD' && !oDict.isValidToken(oToken.sValue)) {
            if (oToken.sType === 'WORD' && !oSpellChecker.isValidToken(oToken.sValue)) {
                aSpellErr.push(oToken);
            }
        }
        return aSpellErr;
    }
}