Grammalecte  Changes On Branch d0f7053d0ff79ff3

Changes In Branch webext2 Through [d0f7053d0f] Excluding Merge-Ins

This is equivalent to a diff from 6af1e457f4 to d0f7053d0f

2017-08-06
13:27
[build] don’t use Firefox nightly for Addon-SDK tests -> developper edition check-in: 97544c27f2 user: olr tags: build, webext2
12:22
[core][fr][js] merge webext2_fix: code cleaning + bug fixes + remove Array comprehensions (for compatibility) check-in: d0f7053d0f user: olr tags: fr, core, webext2
12:14
[core][js] set echo as console.log if no require available check-in: 7cc552ce5a user: olr tags: core, webext2_fix
2017-08-05
16:01
[fx] test with Nightly again check-in: 578863e008 user: olr tags: fx, webext2
2017-08-01
09:50
[core][py] variable renaming check-in: 1b98af1338 user: olr tags: trunk, core
2017-07-31
15:31
[core][js] helpers as object check-in: 1140b979f8 user: olr tags: core, webext2
07:52
[core][js] text: remove useless functions check-in: 6af1e457f4 user: olr tags: trunk, core
07:44
[core][js][fr] JavaScript sucks: avoid weird and unpredictable behavior, infinite loop and similar crazyness -> stable number of groups in regex check-in: 20ec5b88a4 user: olr tags: trunk, core, warning

Modified gc_core/js/helpers.js from [da0905c944] to [8fee48eb89].

1
2
3
4
5
6
7
8
9


10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

88
89
90
91
92
93
94
95
96
97
98

// HELPERS

"use strict";

// In Firefox, there is no console.log in PromiseWorker, but there is worker.log.
// In Thunderbird, you can’t access to console directly. So it’s required to pass a log function.
let funcOutput = null;



function setLogOutput (func) {
    funcOutput = func;
}

function echo (obj) {
    if (funcOutput !== null) {
        funcOutput(obj);
    } else {
        console.log(obj);
    }
    return true;
}

function logerror (e, bStack=false) {
    let sMsg = "\n" + e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message;
    if (bStack) {
        sMsg += "\n--- Stack ---\n" + e.stack;
    }
    if (funcOutput !== null) {
        funcOutput(sMsg);
    } else {
        console.error(sMsg);
    }
}

function inspect (o) {
    let sMsg = "__inspect__: " + typeof o;
    for (let sParam in o) {
        sMsg += "\n" + sParam + ": " + o.sParam;
    }
    sMsg += "\n" + JSON.stringify(o) + "\n__end__";
    echo(sMsg);
}


// load ressources in workers (suggested by Mozilla extensions reviewers)
// for more options have a look here: https://gist.github.com/Noitidart/ec1e6b9a593ec7e3efed
// if not in workers, use sdk/data.load() instead
function loadFile (spf) {
    try {
        let xRequest;
        if (typeof XMLHttpRequest !== "undefined") {
            xRequest = new XMLHttpRequest();
        }
        else {
            // JS bullshit again… necessary for Thunderbird
            let { Cc, Ci } = require("chrome");
            xRequest = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"].createInstance();
            xRequest.QueryInterface(Ci.nsIXMLHttpRequest);
        }
        xRequest.open('GET', spf, false); // 3rd arg is false for synchronous, sync is acceptable in workers
        xRequest.send();
        return xRequest.responseText;
    }
    catch (e) {
        logerror(e);
        return null
    }
}


// conversions
function objectToMap (obj) {
    let m = new Map();
    for (let param in obj) {
        //console.log(param + " " + obj[param]);
        m.set(param, obj[param]);
    }
    return m;
}

function mapToObject (m) {
    let obj = {};
    for (let [k, v] of m) {
        obj[k] = v;
    }
    return obj;
}



if (typeof(exports) !== 'undefined') {
    exports.setLogOutput = setLogOutput;
    exports.echo = echo;
    exports.logerror = logerror;
    exports.inspect = inspect;
    exports.loadFile = loadFile;
    exports.objectToMap = objectToMap;
    exports.mapToObject = mapToObject;
}









>
>
|
|
|

|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|

|
|
|
|
<
|
|
|
|
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

<
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
>



|
|
|
|
|
|
|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

50
51
52
53

54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69

70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

// HELPERS

"use strict";

// In Firefox, there is no console.log in PromiseWorker, but there is worker.log.
// In Thunderbird, you can’t access to console directly. So it’s required to pass a log function.
let funcOutput = null;

var helpers = {

    setLogOutput: function (func) {
        funcOutput = func;
    },

    echo: function (obj) {
        if (funcOutput !== null) {
            funcOutput(obj);
        } else {
            console.log(obj);
        }
        return true;
    },

    logerror: function (e, bStack=false) {
        let sMsg = "\n" + e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message;
        if (bStack) {
            sMsg += "\n--- Stack ---\n" + e.stack;
        }
        if (funcOutput !== null) {
            funcOutput(sMsg);
        } else {
            console.error(sMsg);
        }
    },

    inspect: function (o) {
        let sMsg = "__inspect__: " + typeof o;
        for (let sParam in o) {
            sMsg += "\n" + sParam + ": " + o.sParam;
        }
        sMsg += "\n" + JSON.stringify(o) + "\n__end__";
        this.echo(sMsg);
    },

    loadFile: function (spf) {
        // load ressources in workers (suggested by Mozilla extensions reviewers)
        // for more options have a look here: https://gist.github.com/Noitidart/ec1e6b9a593ec7e3efed
        // if not in workers, use sdk/data.load() instead

        try {
            let xRequest;
            if (typeof XMLHttpRequest !== "undefined") {
                xRequest = new XMLHttpRequest();

            } else {
                // JS bullshit again… necessary for Thunderbird
                let { Cc, Ci } = require("chrome");
                xRequest = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"].createInstance();
                xRequest.QueryInterface(Ci.nsIXMLHttpRequest);
            }
            xRequest.open('GET', spf, false); // 3rd arg is false for synchronous, sync is acceptable in workers
            xRequest.send();
            return xRequest.responseText;
        }
        catch (e) {
            this.logerror(e);
            return null;
        }
    },


    // conversions
    objectToMap: function (obj) {
        let m = new Map();
        for (let param in obj) {
            //console.log(param + " " + obj[param]);
            m.set(param, obj[param]);
        }
        return m;
    },

    mapToObject: function (m) {
        let obj = {};
        for (let [k, v] of m) {
            obj[k] = v;
        }
        return obj;
    }
};


if (typeof(exports) !== 'undefined') {
    exports.setLogOutput = helpers.setLogOutput;
    exports.echo = helpers.echo;
    exports.logerror = helpers.logerror;
    exports.inspect = helpers.inspect;
    exports.loadFile = helpers.loadFile;
    exports.objectToMap = helpers.objectToMap;
    exports.mapToObject = helpers.mapToObject;
}

Modified gc_core/js/ibdawg.js from [e748c8288e] to [cdddd20c84].

1
2
3
4


5
6

7
8
9
10
11
12
13
14
15
16
17
18
19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
//// IBDAWG

"use strict";



const st = require("resource://grammalecte/str_transform.js");
const helpers = require("resource://grammalecte/helpers.js");



// String
// Don’t remove. Necessary in TB.
${string}



class IBDAWG {
    // INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH

    constructor (sDicName) {
        try {

            const dict = JSON.parse(helpers.loadFile("resource://grammalecte/_dictionaries/"+sDicName));
            Object.assign(this, dict);
            //const dict = require("resource://grammalecte/"+sLang+"/dictionary.js");
            //Object.assign(this, dict.dictionary);
        }
        catch (e) {
            throw Error("# Error. File not found or not loadable.\n" + e.message + "\n");
        }
        /*
            Properties:
            sName, nVersion, sHeader, lArcVal, nArcVal, byDic, sLang, nChar, nBytesArc, nBytesNodeAddress,
            nEntries, nNode, nArc, nAff, cStemming, nTag, dChar, _arcMask, _finalNodeMask, _lastArcMask, _addrBitMask, nBytesOffset,
        */
        if (!this.sHeader.startsWith("/pyfsa/")) {
            throw TypeError("# Error. Not a pyfsa binary dictionary. Header: " + this.sHeader);
        }
        if (!(this.nVersion == "1" || this.nVersion == "2" || this.nVersion == "3")) {
            throw RangeError("# Error. Unknown dictionary version: " + this.nVersion);
        }

        this.dChar = helpers.objectToMap(this.dChar);
        //this.byDic = new Uint8Array(this.byDic);  // not quicker, even slower

        if (this.cStemming == "S") {
            this.funcStemming = st.getStemFromSuffixCode;
        } else if (this.cStemming == "A") {
            this.funcStemming = st.getStemFromAffixCode;
        } else {
            this.funcStemming = st.noStemming;
        }

        // Configuring DAWG functions according to nVersion
        switch (this.nVersion) {
            case 1:
                this.morph = this._morph1;
                this.stem = this._stem1;




>
>
|
|
>











|

>
|

<
<




















|

|

|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25


26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
//// IBDAWG

"use strict";


if (typeof(require) !== 'undefined') {
    var str_transform = require("resource://grammalecte/str_transform.js");
    var helpers = require("resource://grammalecte/helpers.js");
}


// String
// Don’t remove. Necessary in TB.
${string}



class IBDAWG {
    // INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH

    constructor (sDicName, sPath="") {
        try {
            let sURL = (sPath !== "") ? sPath + "/" + sDicName : "resource://grammalecte/_dictionaries/"+sDicName;
            const dict = JSON.parse(helpers.loadFile(sURL));
            Object.assign(this, dict);


        }
        catch (e) {
            throw Error("# Error. File not found or not loadable.\n" + e.message + "\n");
        }
        /*
            Properties:
            sName, nVersion, sHeader, lArcVal, nArcVal, byDic, sLang, nChar, nBytesArc, nBytesNodeAddress,
            nEntries, nNode, nArc, nAff, cStemming, nTag, dChar, _arcMask, _finalNodeMask, _lastArcMask, _addrBitMask, nBytesOffset,
        */
        if (!this.sHeader.startsWith("/pyfsa/")) {
            throw TypeError("# Error. Not a pyfsa binary dictionary. Header: " + this.sHeader);
        }
        if (!(this.nVersion == "1" || this.nVersion == "2" || this.nVersion == "3")) {
            throw RangeError("# Error. Unknown dictionary version: " + this.nVersion);
        }

        this.dChar = helpers.objectToMap(this.dChar);
        //this.byDic = new Uint8Array(this.byDic);  // not quicker, even slower

        if (this.cStemming == "S") {
            this.funcStemming = str_transform.getStemFromSuffixCode;
        } else if (this.cStemming == "A") {
            this.funcStemming = str_transform.getStemFromAffixCode;
        } else {
            this.funcStemming = str_transform.noStemming;
        }

        // Configuring DAWG functions according to nVersion
        switch (this.nVersion) {
            case 1:
                this.morph = this._morph1;
                this.stem = this._stem1;
70
71
72
73
74
75
76
77

78
79
80
81
82
83
84

85
86
87
88
89
90
91
92
93
94
95
96
97
98

99
100
101
102
103
104
105
                break;
            default:
                throw ValueError("# Error: unknown code: " + this.nVersion);
        }
        //console.log(this.getInfo());
        this.bOptNumSigle = true;
        this.bOptNumAtLast = false;
    };


    getInfo () {
        return  `  Language: ${this.sLang}      Version: ${this.nVersion}      Stemming: ${this.cStemming}FX\n` +
                `  Arcs values:  ${this.nArcVal} = ${this.nChar} characters,  ${this.nAff} affixes,  ${this.nTag} tags\n` +
                `  Dictionary: ${this.nEntries} entries,    ${this.nNode} nodes,   ${this.nArc} arcs\n` +
                `  Address size: ${this.nBytesNodeAddress} bytes,  Arc size: ${this.nBytesArc} bytes\n`;
    };


    isValidToken (sToken) {
        // checks if sToken is valid (if there is hyphens in sToken, sToken is split, each part is checked)
        if (this.isValid(sToken)) {
            return true;
        }
        if (sToken.includes("-")) {
            if (sToken.gl_count("-") > 4) {
                return true;
            }
            return sToken.split("-").every(sWord  =>  this.isValid(sWord)); 
        }
        return false;
    };


    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (!sWord) {
            return null;
        }
        if (sWord.includes("’")) { // ugly hack







<
>






<
>













<
>







72
73
74
75
76
77
78

79
80
81
82
83
84
85

86
87
88
89
90
91
92
93
94
95
96
97
98
99

100
101
102
103
104
105
106
107
                break;
            default:
                throw ValueError("# Error: unknown code: " + this.nVersion);
        }
        //console.log(this.getInfo());
        this.bOptNumSigle = true;
        this.bOptNumAtLast = false;

    }

    getInfo () {
        return  `  Language: ${this.sLang}      Version: ${this.nVersion}      Stemming: ${this.cStemming}FX\n` +
                `  Arcs values:  ${this.nArcVal} = ${this.nChar} characters,  ${this.nAff} affixes,  ${this.nTag} tags\n` +
                `  Dictionary: ${this.nEntries} entries,    ${this.nNode} nodes,   ${this.nArc} arcs\n` +
                `  Address size: ${this.nBytesNodeAddress} bytes,  Arc size: ${this.nBytesArc} bytes\n`;

    }

    isValidToken (sToken) {
        // checks if sToken is valid (if there is hyphens in sToken, sToken is split, each part is checked)
        if (this.isValid(sToken)) {
            return true;
        }
        if (sToken.includes("-")) {
            if (sToken.gl_count("-") > 4) {
                return true;
            }
            return sToken.split("-").every(sWord  =>  this.isValid(sWord)); 
        }
        return false;

    }

    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (!sWord) {
            return null;
        }
        if (sWord.includes("’")) { // ugly hack
121
122
123
124
125
126
127
128

129
130
131
132
133
134
135
136
137
138
139

140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

155
156
157
158
159
160
161
162
163
164
165
166

167
168
169
170
171
172
173
174
175
176
177
                }
                return !!this.lookup(sWord.slice(0, 1).toLowerCase() + sWord.slice(1));
            } else {
                return !!this.lookup(sWord.toLowerCase());
            }
        }
        return false;
    };


    _convBytesToInteger (aBytes) {
        // Byte order = Big Endian (bigger first)
        let nVal = 0;
        let nWeight = (aBytes.length - 1) * 8;
        for (let n of aBytes) {
            nVal += n << nWeight;
            nWeight = nWeight - 8;
        }
        return nVal;
    };


    lookup (sWord) {
        // returns true if sWord in dictionary (strict verification)
        let iAddr = 0;
        for (let c of sWord) {
            if (!this.dChar.has(c)) {
                return false;
            }
            iAddr = this._lookupArcNode(this.dChar.get(c), iAddr);
            if (iAddr === null) {
                return false;
            }
        }
        return Boolean(this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask);
    };


    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let l = this.morph(sWord);
        if (sWord[0].gl_isUpperCase()) {
            l = l.concat(this.morph(sWord.toLowerCase()));
            if (sWord.gl_isUpperCase() && sWord.length > 1) {
                l = l.concat(this.morph(sWord.gl_toCapitalize()));
            }
        }
        return l;
    };


    // morph (sWord) {
    //     is defined in constructor
    // };
    
    // VERSION 1
    _morph1 (sWord) {
        // returns morphologies of sWord
        let iAddr = 0;
        for (let c of sWord) {
            if (!this.dChar.has(c)) {







<
>










<
>














<
>











<
>



|







123
124
125
126
127
128
129

130
131
132
133
134
135
136
137
138
139
140

141
142
143
144
145
146
147
148
149
150
151
152
153
154
155

156
157
158
159
160
161
162
163
164
165
166
167

168
169
170
171
172
173
174
175
176
177
178
179
                }
                return !!this.lookup(sWord.slice(0, 1).toLowerCase() + sWord.slice(1));
            } else {
                return !!this.lookup(sWord.toLowerCase());
            }
        }
        return false;

    }

    _convBytesToInteger (aBytes) {
        // Byte order = Big Endian (bigger first)
        let nVal = 0;
        let nWeight = (aBytes.length - 1) * 8;
        for (let n of aBytes) {
            nVal += n << nWeight;
            nWeight = nWeight - 8;
        }
        return nVal;

    }

    lookup (sWord) {
        // returns true if sWord in dictionary (strict verification)
        let iAddr = 0;
        for (let c of sWord) {
            if (!this.dChar.has(c)) {
                return false;
            }
            iAddr = this._lookupArcNode(this.dChar.get(c), iAddr);
            if (iAddr === null) {
                return false;
            }
        }
        return Boolean(this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask);

    }

    getMorph (sWord) {
        // retrieves morphologies list, different casing allowed
        let l = this.morph(sWord);
        if (sWord[0].gl_isUpperCase()) {
            l = l.concat(this.morph(sWord.toLowerCase()));
            if (sWord.gl_isUpperCase() && sWord.length > 1) {
                l = l.concat(this.morph(sWord.gl_toCapitalize()));
            }
        }
        return l;

    }

    // morph (sWord) {
    //     is defined in constructor
    // }
    
    // VERSION 1
    _morph1 (sWord) {
        // returns morphologies of sWord
        let iAddr = 0;
        for (let c of sWord) {
            if (!this.dChar.has(c)) {
203
204
205
206
207
208
209
210

211
212
213
214
215
216
217
                    }
                }
                iAddr = iEndArcAddr + this.nBytesNodeAddress;
            }
            return l;
        }
        return [];
    };


    _stem1 (sWord) {
        // returns stems list of sWord
        let iAddr = 0;
        for (let c of sWord) {
            if (!this.dChar.has(c)) {
                return [];







<
>







205
206
207
208
209
210
211

212
213
214
215
216
217
218
219
                    }
                }
                iAddr = iEndArcAddr + this.nBytesNodeAddress;
            }
            return l;
        }
        return [];

    }

    _stem1 (sWord) {
        // returns stems list of sWord
        let iAddr = 0;
        for (let c of sWord) {
            if (!this.dChar.has(c)) {
                return [];
233
234
235
236
237
238
239
240

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260

261
262
263
264
265

266
267
268
269

270
271
272
273

274
275
276
277
278
279

280
281
282
283

284
285
286
287

288
289
290
291
292
293
                    l.push(this.funcStemming(sWord, this.lArcVal[nArc]));
                }
                iAddr = iEndArcAddr + this.nBytesNodeAddress;
            }
            return l;
        }
        return [];
    };


    _lookupArcNode1 (nVal, iAddr) {
        // looks if nVal is an arc at the node at iAddr, if yes, returns address of next node else None
        while (true) {
            let iEndArcAddr = iAddr+this.nBytesArc;
            let nRawArc = this._convBytesToInteger(this.byDic.slice(iAddr, iEndArcAddr));
            if (nVal == (nRawArc & this._arcMask)) {
                // the value we are looking for 
                // we return the address of the next node
                return this._convBytesToInteger(this.byDic.slice(iEndArcAddr, iEndArcAddr+this.nBytesNodeAddress));
            }
            else {
                // value not found
                if (nRawArc & this._lastArcMask) {
                    return null;
                }
                iAddr = iEndArcAddr + this.nBytesNodeAddress;
            }
        }
    };


    // VERSION 2
    _morph2 (sWord) {
        // to do
    };


    _stem2 (sWord) {
        // to do
    };


    _lookupArcNode2 (nVal, iAddr) {
        // to do
    };



    // VERSION 3
    _morph3 (sWord) {
        // to do
    };


    _stem3 (sWord) {
        // to do
    };


    _lookupArcNode3 (nVal, iAddr) {
        // to do
    };

}


if (typeof(exports) !== 'undefined') {
    exports.IBDAWG = IBDAWG;
}







<
>



















<
>




<
>



<
>



<
>





<
>



<
>



<
>






235
236
237
238
239
240
241

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261

262
263
264
265
266

267
268
269
270

271
272
273
274

275
276
277
278
279
280

281
282
283
284

285
286
287
288

289
290
291
292
293
294
295
                    l.push(this.funcStemming(sWord, this.lArcVal[nArc]));
                }
                iAddr = iEndArcAddr + this.nBytesNodeAddress;
            }
            return l;
        }
        return [];

    }

    _lookupArcNode1 (nVal, iAddr) {
        // looks if nVal is an arc at the node at iAddr, if yes, returns address of next node else None
        while (true) {
            let iEndArcAddr = iAddr+this.nBytesArc;
            let nRawArc = this._convBytesToInteger(this.byDic.slice(iAddr, iEndArcAddr));
            if (nVal == (nRawArc & this._arcMask)) {
                // the value we are looking for 
                // we return the address of the next node
                return this._convBytesToInteger(this.byDic.slice(iEndArcAddr, iEndArcAddr+this.nBytesNodeAddress));
            }
            else {
                // value not found
                if (nRawArc & this._lastArcMask) {
                    return null;
                }
                iAddr = iEndArcAddr + this.nBytesNodeAddress;
            }
        }

    }

    // VERSION 2
    _morph2 (sWord) {
        // to do

    }

    _stem2 (sWord) {
        // to do

    }

    _lookupArcNode2 (nVal, iAddr) {
        // to do

    }


    // VERSION 3
    _morph3 (sWord) {
        // to do

    }

    _stem3 (sWord) {
        // to do

    }

    _lookupArcNode3 (nVal, iAddr) {
        // to do

    }
}


if (typeof(exports) !== 'undefined') {
    exports.IBDAWG = IBDAWG;
}

Modified gc_core/js/jsex_map.js from [985754ee9e] to [ca76af0666].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

// Map

if (Map.prototype.grammalecte === undefined) {
    Map.prototype.gl_shallowCopy = function () {
        let oNewMap = new Map();
        for (let [key, val] of this.entries()) {
            oNewMap.set(key, val);
        }
        return oNewMap;
    }

    Map.prototype.gl_get = function (key, defaultValue) {
        let res = this.get(key);
        if (res !== undefined) {
            return res;
        }
        return defaultValue;
    }

    Map.prototype.gl_toString = function () {
        // Default .toString() gives nothing useful
        let sRes = "{ ";
        for (let [k, v] of this.entries()) {
            sRes += (typeof k === "string") ? '"' + k + '": ' : k.toString() + ": ";
            sRes += (typeof v === "string") ? '"' + v + '", ' : v.toString() + ", ";
        }
        sRes = sRes.slice(0, -2) + " }"
        return sRes;
    }

    Map.prototype.gl_update = function (dDict) {
        for (let [k, v] of dDict.entries()) {
            this.set(k, v);
        }
    }

    Map.prototype.gl_updateOnlyExistingKeys = function (dDict) {
        for (let [k, v] of dDict.entries()) {
            if (this.has(k)){
                this.set(k, v);
            }
        }
    }

    Map.prototype.grammalecte = true;
}










|







|








|

|





|







|



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

// Map

if (Map.prototype.grammalecte === undefined) {
    Map.prototype.gl_shallowCopy = function () {
        let oNewMap = new Map();
        for (let [key, val] of this.entries()) {
            oNewMap.set(key, val);
        }
        return oNewMap;
    };

    Map.prototype.gl_get = function (key, defaultValue) {
        let res = this.get(key);
        if (res !== undefined) {
            return res;
        }
        return defaultValue;
    };

    Map.prototype.gl_toString = function () {
        // Default .toString() gives nothing useful
        let sRes = "{ ";
        for (let [k, v] of this.entries()) {
            sRes += (typeof k === "string") ? '"' + k + '": ' : k.toString() + ": ";
            sRes += (typeof v === "string") ? '"' + v + '", ' : v.toString() + ", ";
        }
        sRes = sRes.slice(0, -2) + " }";
        return sRes;
    };

    Map.prototype.gl_update = function (dDict) {
        for (let [k, v] of dDict.entries()) {
            this.set(k, v);
        }
    };

    Map.prototype.gl_updateOnlyExistingKeys = function (dDict) {
        for (let [k, v] of dDict.entries()) {
            if (this.has(k)){
                this.set(k, v);
            }
        }
    };

    Map.prototype.grammalecte = true;
}

Modified gc_core/js/jsex_regex.js from [b8b02d05dd] to [8feeee694f].

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
                            m.end.push(m.index + codePos + m[i].length);
                        } else if (codePos === "$") {
                            // at the end of the pattern
                            m.start.push(this.lastIndex - m[i].length);
                            m.end.push(this.lastIndex);
                        } else if (codePos === "w") {
                            // word in the middle of the pattern
                            iPos = m[0].search("[ ’,()«»“”]"+m[i]+"[ ,’()«»“”]") + 1 + m.index
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length)
                        } else if (codePos === "*") {
                            // anywhere
                            iPos = m[0].indexOf(m[i]) + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length)
                        } else if (codePos === "**") {
                            // anywhere after previous group
                            iPos = m[0].indexOf(m[i], m.end[i-1]-m.index) + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length)
                        } else if (codePos.startsWith(">")) {
                            // >x:_
                            // todo: look in substring x
                            iPos = m[0].indexOf(m[i]) + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length);
                        } else {







|

|




|




|







38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
                            m.end.push(m.index + codePos + m[i].length);
                        } else if (codePos === "$") {
                            // at the end of the pattern
                            m.start.push(this.lastIndex - m[i].length);
                            m.end.push(this.lastIndex);
                        } else if (codePos === "w") {
                            // word in the middle of the pattern
                            iPos = m[0].search("[ ’,()«»“”]"+m[i]+"[ ,’()«»“”]") + 1 + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length);
                        } else if (codePos === "*") {
                            // anywhere
                            iPos = m[0].indexOf(m[i]) + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length);
                        } else if (codePos === "**") {
                            // anywhere after previous group
                            iPos = m[0].indexOf(m[i], m.end[i-1]-m.index) + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length);
                        } else if (codePos.startsWith(">")) {
                            // >x:_
                            // todo: look in substring x
                            iPos = m[0].indexOf(m[i]) + m.index;
                            m.start.push(iPos);
                            m.end.push(iPos + m[i].length);
                        } else {
79
80
81
82
83
84
85
86
87
88
89
            if (typeof(helpers) !== "undefined") {
                helpers.logerror(e);
            } else {
                console.error(e);
            }
        }
        return m;
    }

    RegExp.prototype.grammalecte = true;
}







|



79
80
81
82
83
84
85
86
87
88
89
            if (typeof(helpers) !== "undefined") {
                helpers.logerror(e);
            } else {
                console.error(e);
            }
        }
        return m;
    };

    RegExp.prototype.grammalecte = true;
}

Modified gc_core/js/jsex_string.js from [1e9c89a872] to [86533aa4da].

11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
        let iPos = 0;
        let nStep = (bOverlapping) ? 1 : sSearch.length;
        while ((iPos = this.indexOf(sSearch, iPos)) >= 0) {
            nOccur++;
            iPos += nStep;
        }
        return nOccur;
    }
    String.prototype.gl_isDigit = function () {
        return (this.search(/^[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]+$/) !== -1);
    }
    String.prototype.gl_isLowerCase = function () {
        return (this.search(/^[a-zà-öø-ÿ0-9-]+$/) !== -1);
    }
    String.prototype.gl_isUpperCase = function () {
        return (this.search(/^[A-ZÀ-ÖØ-ߌ0-9-]+$/) !== -1);
    }
    String.prototype.gl_isTitle = function () {
        return (this.search(/^[A-ZÀ-ÖØ-ߌ][a-zà-öø-ÿ'’-]+$/) !== -1);
    }
    String.prototype.gl_toCapitalize = function () {
        return this.slice(0,1).toUpperCase() + this.slice(1).toLowerCase();
    }
    String.prototype.gl_expand = function (oMatch) {
        let sNew = this;
        for (let i = 0; i < oMatch.length ; i++) {
            let z = new RegExp("\\\\"+parseInt(i), "g");
            sNew = sNew.replace(z, oMatch[i]);
        }
        return sNew;
    }
    String.prototype.gl_trimRight = function (sChars) {
        let z = new RegExp("["+sChars+"]+$");
        return this.replace(z, "");
    }
    String.prototype.gl_trimLeft = function (sChars) {
        let z = new RegExp("^["+sChars+"]+");
        return this.replace(z, "");
    }
    String.prototype.gl_trim = function (sChars) {
        let z1 = new RegExp("^["+sChars+"]+");
        let z2 = new RegExp("["+sChars+"]+$");
        return this.replace(z1, "").replace(z2, "");
    }

    String.prototype.grammalecte = true;
}







|


|


|


|


|


|







|



|



|




|



11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
        let iPos = 0;
        let nStep = (bOverlapping) ? 1 : sSearch.length;
        while ((iPos = this.indexOf(sSearch, iPos)) >= 0) {
            nOccur++;
            iPos += nStep;
        }
        return nOccur;
    };
    String.prototype.gl_isDigit = function () {
        return (this.search(/^[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]+$/) !== -1);
    };
    String.prototype.gl_isLowerCase = function () {
        return (this.search(/^[a-zà-öø-ÿ0-9-]+$/) !== -1);
    };
    String.prototype.gl_isUpperCase = function () {
        return (this.search(/^[A-ZÀ-ÖØ-ߌ0-9-]+$/) !== -1);
    };
    String.prototype.gl_isTitle = function () {
        return (this.search(/^[A-ZÀ-ÖØ-ߌ][a-zà-öø-ÿ'’-]+$/) !== -1);
    };
    String.prototype.gl_toCapitalize = function () {
        return this.slice(0,1).toUpperCase() + this.slice(1).toLowerCase();
    };
    String.prototype.gl_expand = function (oMatch) {
        let sNew = this;
        for (let i = 0; i < oMatch.length ; i++) {
            let z = new RegExp("\\\\"+parseInt(i), "g");
            sNew = sNew.replace(z, oMatch[i]);
        }
        return sNew;
    };
    String.prototype.gl_trimRight = function (sChars) {
        let z = new RegExp("["+sChars+"]+$");
        return this.replace(z, "");
    };
    String.prototype.gl_trimLeft = function (sChars) {
        let z = new RegExp("^["+sChars+"]+");
        return this.replace(z, "");
    };
    String.prototype.gl_trim = function (sChars) {
        let z1 = new RegExp("^["+sChars+"]+");
        let z2 = new RegExp("["+sChars+"]+$");
        return this.replace(z1, "").replace(z2, "");
    };

    String.prototype.grammalecte = true;
}

Modified gc_core/js/lang_core/gc_engine.js from [ce3013cd59] to [46602d3f85].

1
2


3
4
5
6















7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45











46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

97



98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174

175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258

259

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293

294



295


296
297
298
299
300


301



302
303
304
305
306
307
308
309
310
311
312
313
314

315
316
317

318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
// Grammar checker engine



${string}
${regex}
${map}

















function capitalizeArray (aArray) {
    // can’t map on user defined function??
    let aNew = [];
    for (let i = 0; i < aArray.length; i = i + 1) {
        aNew[i] = aArray[i].gl_toCapitalize();
    }
    return aNew;
}

const ibdawg = require("resource://grammalecte/ibdawg.js");
const helpers = require("resource://grammalecte/helpers.js");
const gc_options = require("resource://grammalecte/${lang}/gc_options.js");
const cr = require("resource://grammalecte/${lang}/cregex.js");
const text = require("resource://grammalecte/text.js");
const echo = require("resource://grammalecte/helpers.js").echo;

const lang = "${lang}";
const locales = ${loc};
const pkg = "${implname}";
const name = "${name}";
const version = "${version}";
const author = "${author}";

// commons regexes
const _zEndOfSentence = new RegExp ('([.?!:;…][ .?!… »”")]*|.$)', "g");
const _zBeginOfParagraph = new RegExp ("^[-  –—.,;?!…]*", "ig");
const _zEndOfParagraph = new RegExp ("[-  .,;?!…–—]*$", "ig");

// grammar rules and dictionary
//const _rules = require("./gc_rules.js");
let _sContext = "";                                 // what software is running
const _rules = require("resource://grammalecte/${lang}/gc_rules.js");
let _dOptions = null;
let _aIgnoredRules = new Set();
let _oDict = null;
let _dAnalyses = new Map();                         // cache for data from dictionary













///// Parsing

function parse (sText, sCountry="${country_default}", bDebug=false, bContext=false) {
    // analyses the paragraph sText and returns list of errors
    let dErrors;
    let errs;
    let sAlt = sText;
    let dDA = new Map();        // Disamnbiguator
    let dPriority = new Map();  // Key = position; value = priority
    let sNew = "";

    // parse paragraph
    try {
        [sNew, dErrors] = _proofread(sText, sAlt, 0, true, dDA, dPriority, sCountry, bDebug, bContext);
        if (sNew) {
            sText = sNew;
        }
    }
    catch (e) {
        helpers.logerror(e);
    }

    // cleanup
    if (sText.includes(" ")) {
        sText = sText.replace(/ /g, ' '); // nbsp
    }
    if (sText.includes(" ")) {
        sText = sText.replace(/ /g, ' '); // snbsp
    }
    if (sText.includes("'")) {
        sText = sText.replace(/'/g, "’");
    }
    if (sText.includes("‑")) {
        sText = sText.replace(/‑/g, "-"); // nobreakdash
    }

    // parse sentence
    for (let [iStart, iEnd] of _getSentenceBoundaries(sText)) {
        if (4 < (iEnd - iStart) < 2000) {
            dDA.clear();
            //echo(sText.slice(iStart, iEnd));
            try {
                [_, errs] = _proofread(sText.slice(iStart, iEnd), sAlt.slice(iStart, iEnd), iStart, false, dDA, dPriority, sCountry, bDebug, bContext);
                dErrors.gl_update(errs);
            }
            catch (e) {
                helpers.logerror(e);
            }
        }
    }
    return Array.from(dErrors.values());

}




function* _getSentenceBoundaries (sText) {
    let mBeginOfSentence = _zBeginOfParagraph.exec(sText)
    let iStart = _zBeginOfParagraph.lastIndex;
    let m;
    while ((m = _zEndOfSentence.exec(sText)) !== null) {
        yield [iStart, _zEndOfSentence.lastIndex];
        iStart = _zEndOfSentence.lastIndex;
    }
}

function _proofread (s, sx, nOffset, bParagraph, dDA, dPriority, sCountry, bDebug, bContext) {
    let dErrs = new Map();
    let bChange = false;
    let bIdRule = option('idrule');
    let m;
    let bCondMemo;
    let nErrorStart;

    for (let [sOption, lRuleGroup] of _getRules(bParagraph)) {
        if (!sOption || option(sOption)) {
            for (let [zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions, lGroups, lNegLookBefore] of lRuleGroup) {
                if (!_aIgnoredRules.has(sRuleId)) {
                    while ((m = zRegex.gl_exec2(s, lGroups, lNegLookBefore)) !== null) {
                        bCondMemo = null;
                        /*if (bDebug) {
                            echo(">>>> Rule # " + sLineId + " - Text: " + s + " opt: "+ sOption);
                        }*/
                        for (let [sFuncCond, cActionType, sWhat, ...eAct] of lActions) {
                        // action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
                            try {
                                //echo(oEvalFunc[sFuncCond]);
                                bCondMemo = (!sFuncCond || oEvalFunc[sFuncCond](s, sx, m, dDA, sCountry, bCondMemo))
                                if (bCondMemo) {
                                    switch (cActionType) {
                                        case "-":
                                            // grammar error
                                            //echo("-> error detected in " + sLineId + "\nzRegex: " + zRegex.source);
                                            nErrorStart = nOffset + m.start[eAct[0]];
                                            if (!dErrs.has(nErrorStart) || nPriority > dPriority.get(nErrorStart)) {
                                                dErrs.set(nErrorStart, _createError(s, sx, sWhat, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption, bContext));
                                                dPriority.set(nErrorStart, nPriority);
                                            }
                                            break;
                                        case "~":
                                            // text processor
                                            //echo("-> text processor by " + sLineId + "\nzRegex: " + zRegex.source);
                                            s = _rewrite(s, sWhat, eAct[0], m, bUppercase);
                                            bChange = true;
                                            if (bDebug) {
                                                echo("~ " + s + "  -- " + m[eAct[0]] + "  # " + sLineId);
                                            }
                                            break;
                                        case "=":
                                            // disambiguation
                                            //echo("-> disambiguation by " + sLineId + "\nzRegex: " + zRegex.source);
                                            oEvalFunc[sWhat](s, m, dDA);
                                            if (bDebug) {
                                                echo("= " + m[0] + "  # " + sLineId + "\nDA: " + dDA.gl_toString());
                                            }
                                            break;
                                        case ">":
                                            // we do nothing, this test is just a condition to apply all following actions
                                            break;
                                        default:
                                            echo("# error: unknown action at " + sLineId);
                                    }
                                } else {
                                    if (cActionType == ">") {
                                        break;
                                    }
                                }
                            }
                            catch (e) {
                                echo(s);
                                echo("# line id: " + sLineId + "\n# rule id: " + sRuleId);
                                helpers.logerror(e);

                            }
                        }
                    }
                }
            }
        }
    }
    if (bChange) {
        return [s, dErrs];
    }
    return [false, dErrs];
}

function _createError (s, sx, sRepl, nOffset, m, iGroup, sLineId, sRuleId, bUppercase, sMsg, sURL, bIdRule, sOption, bContext) {
    let oErr = {};
    oErr["nStart"] = nOffset + m.start[iGroup];
    oErr["nEnd"] = nOffset + m.end[iGroup];
    oErr["sLineId"] = sLineId;
    oErr["sRuleId"] = sRuleId;
    oErr["sType"] = (sOption) ? sOption : "notype";
    // suggestions
    if (sRepl[0] === "=") {
        let sugg = oEvalFunc[sRepl.slice(1)](s, m);
        if (sugg) {
            if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
                oErr["aSuggestions"] = capitalizeArray(sugg.split("|"));
            } else {
                oErr["aSuggestions"] = sugg.split("|");
            }
        } else {
            oErr["aSuggestions"] = [];
        }
    } else if (sRepl == "_") {
        oErr["aSuggestions"] = [];
    } else {
        if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
            oErr["aSuggestions"] = capitalizeArray(sRepl.gl_expand(m).split("|"));
        } else {
            oErr["aSuggestions"] = sRepl.gl_expand(m).split("|");
        }
    }
    // Message

    if (sMsg[0] === "=") {
        sMessage = oEvalFunc[sMsg.slice(1)](s, m)
    } else {
        sMessage = sMsg.gl_expand(m);
    }
    if (bIdRule) {
        sMessage += " ##" + sLineId + " #" + sRuleId;
    }
    oErr["sMessage"] = sMessage;
    // URL
    oErr["URL"] = sURL || "";
    // Context
    if (bContext) {
        oErr["sUnderlined"] = sx.slice(m.start[iGroup], m.end[iGroup]);
        oErr["sBefore"] = sx.slice(Math.max(0, m.start[iGroup]-80), m.start[iGroup]);
        oErr["sAfter"] = sx.slice(m.end[iGroup], m.end[iGroup]+80);
    }
    return oErr;
}

function _rewrite (s, sRepl, iGroup, m, bUppercase) {
    // text processor: write sRepl in s at iGroup position"
    let ln = m.end[iGroup] - m.start[iGroup];
    let sNew = "";
    if (sRepl === "*") {
        sNew = " ".repeat(ln);
    } else if (sRepl === ">" || sRepl === "_" || sRepl === "~") {
        sNew = sRepl + " ".repeat(ln-1);
    } else if (sRepl === "@") {
        sNew = "@".repeat(ln);
    } else if (sRepl.slice(0,1) === "=") {
        sNew = oEvalFunc[sRepl.slice(1)](s, m);
        sNew = sNew + " ".repeat(ln-sNew.length);
        if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
            sNew = sNew.gl_toCapitalize();
        }
    } else {
        sNew = sRepl.gl_expand(m);
        sNew = sNew + " ".repeat(ln-sNew.length);
    }
    //echo("\n"+s+"\nstart: "+m.start[iGroup]+" end:"+m.end[iGroup])
    return s.slice(0, m.start[iGroup]) + sNew + s.slice(m.end[iGroup]);

}


function ignoreRule (sRuleId) {
    _aIgnoredRules.add(sRuleId);
}

function resetIgnoreRules () {
    _aIgnoredRules.clear();
}

function reactivateRule (sRuleId) {
    _aIgnoredRules.delete(sRuleId);
}

function listRules (sFilter=null) {
    // generator: returns tuple (sOption, sLineId, sRuleId)
    try {
        for ([sOption, lRuleGroup] of _getRules(true)) {
            for ([_, _, sLineId, sRuleId, _, _] of lRuleGroup) {
                if (!sFilter || sRuleId.test(sFilter)) {
                    yield [sOption, sLineId, sRuleId];
                }
            }
        }
        for ([sOption, lRuleGroup] of _getRules(false)) {
            for ([_, _, sLineId, sRuleId, _, _] of lRuleGroup) {
                if (!sFilter || sRuleId.test(sFilter)) {
                    yield [sOption, sLineId, sRuleId];
                }
            }
        }
    }
    catch (e) {
        helpers.logerror(e);
    }

}







//////// init

function load (sContext="JavaScript") {
    try {


        _oDict = new ibdawg.IBDAWG("${dic_name}.json");



        _sContext = sContext;
        _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
    }
    catch (e) {
        helpers.logerror(e);
    }
}

function setOption (sOpt, bVal) {
    if (_dOptions.has(sOpt)) {
        _dOptions.set(sOpt, bVal);
    }
}


function setOptions (dOpt) {
    _dOptions.gl_updateOnlyExistingKeys(dOpt);

}

function getOptions () {
    return _dOptions;
}

function getDefaultOptions () {
    return gc_options.getOptions(_sContext).gl_shallowCopy();
}

function resetOptions () {
    _dOptions = gc_options.getOptions(_sContext).gl_shallowCopy();
}

function getDictionary () {
    return _oDict;
}

function _getRules (bParagraph) {
    if (!bParagraph) {
        return _rules.lSentenceRules;
    }
    return _rules.lParagraphRules;
}



//////// common functions

function option (sOpt) {
    // return true if option sOpt is active
    return _dOptions.get(sOpt);
}

function displayInfo (dDA, aWord) {
    // for debugging: info of word
    if (!aWord) {
        echo("> nothing to find");
        return true;
    }
    if (!_dAnalyses.has(aWord[1]) && !_storeMorphFromFSA(aWord[1])) {
        echo("> not in FSA");
        return true;
    }
    if (dDA.has(aWord[0])) {
        echo("DA: " + dDA.get(aWord[0]));
    }
    echo("FSA: " + _dAnalyses.get(aWord[1]));
    return true;
}

function _storeMorphFromFSA (sWord) {
    // retrieves morphologies list from _oDict -> _dAnalyses
    //echo("register: "+sWord + " " + _oDict.getMorph(sWord).toString())
    _dAnalyses.set(sWord, _oDict.getMorph(sWord));
    return !!_dAnalyses.get(sWord);
}

function morph (dDA, aWord, sPattern, bStrict=true, bNoWord=false) {
    // analyse a tuple (position, word), return true if sPattern in morphologies (disambiguation on)
    if (!aWord) {
        //echo("morph: noword, returns " + bNoWord);
        return bNoWord;
    }
    //echo("aWord: "+aWord.toString());
    if (!_dAnalyses.has(aWord[1]) && !_storeMorphFromFSA(aWord[1])) {
        return false;
    }
    let lMorph = dDA.has(aWord[0]) ? dDA.get(aWord[0]) : _dAnalyses.get(aWord[1]);
    //echo("lMorph: "+lMorph.toString());
    if (lMorph.length === 0) {
        return false;
    }
    //echo("***");
    if (bStrict) {
        return lMorph.every(s  =>  (s.search(sPattern) !== -1));
    }
    return lMorph.some(s  =>  (s.search(sPattern) !== -1));
}

function morphex (dDA, aWord, sPattern, sNegPattern, bNoWord=false) {
    // analyse a tuple (position, word), returns true if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)
    if (!aWord) {
        //echo("morph: noword, returns " + bNoWord);
        return bNoWord;
    }
    //echo("aWord: "+aWord.toString());
    if (!_dAnalyses.has(aWord[1]) && !_storeMorphFromFSA(aWord[1])) {
        return false;
    }
    let lMorph = dDA.has(aWord[0]) ? dDA.get(aWord[0]) : _dAnalyses.get(aWord[1]);
    //echo("lMorph: "+lMorph.toString());
    if (lMorph.length === 0) {
        return false;
    }
    //echo("***");
    // check negative condition
    if (lMorph.some(s  =>  (s.search(sNegPattern) !== -1))) {
        return false;
    }
    // search sPattern
    return lMorph.some(s  =>  (s.search(sPattern) !== -1));
}


>
>




>
>
>
>
>
>
>
>
>
>
>
>
>
>
>










<
<
<
<
<
<

<
<
<
<
<
<
|
<
<
<
<
<
<
<
|
<



|


>
>
>
>
>
>
>
>
>
>
>
|

|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
|
>
>
>

|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>






<
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
|
>

|
|
|

|
|
|

|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
|
>
>
>
|
>
>

|

|
|
>
>
|
>
>
>
|
|
|
|
|
|
|

|
<
|
|
|
>

|
|
>
|
<
<
<
|

|
|
|

|
|
|

|
|
|

|
|
<

<
|


<
|









|



|



|

|





|







|


|




|



|









|


|




|



|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33






34






35







36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336

337
338
339
340
341
342
343
344
345



346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361

362

363
364
365

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
// Grammar checker engine

"use strict";

${string}
${regex}
${map}


if (typeof(require) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");
    var gc_options = require("resource://grammalecte/${lang}/gc_options.js");
    var gc_rules = require("resource://grammalecte/${lang}/gc_rules.js");
    var cregex = require("resource://grammalecte/${lang}/cregex.js");
    var text = require("resource://grammalecte/text.js");
    var echo = helpers.echo;
}
else if (typeof(console) !== "undefined") {
    var echo = function (o) { console.log(o); return true; };
}
else {
    var echo = function () { return true; }
}

function capitalizeArray (aArray) {
    // can’t map on user defined function??
    let aNew = [];
    for (let i = 0; i < aArray.length; i = i + 1) {
        aNew[i] = aArray[i].gl_toCapitalize();
    }
    return aNew;
}














// data







let _sAppContext = "";                                  // what software is running

let _dOptions = null;
let _aIgnoredRules = new Set();
let _oDict = null;
let _dAnalyses = new Map();                             // cache for data from dictionary


var gc_engine = {

    //// Informations

    lang: "${lang}",
    locales: ${loc},
    pkg: "${implname}",
    name: "${name}",
    version: "${version}",
    author: "${author}",

    //// Parsing

    parse: function (sText, sCountry="${country_default}", bDebug=false, bContext=false) {
        // analyses the paragraph sText and returns list of errors
        let dErrors;
        let errs;
        let sAlt = sText;
        let dDA = new Map();        // Disamnbiguator
        let dPriority = new Map();  // Key = position; value = priority
        let sNew = "";

        // parse paragraph
        try {
            [sNew, dErrors] = this._proofread(sText, sAlt, 0, true, dDA, dPriority, sCountry, bDebug, bContext);
            if (sNew) {
                sText = sNew;
            }
        }
        catch (e) {
            helpers.logerror(e);
        }

        // cleanup
        if (sText.includes(" ")) {
            sText = sText.replace(/ /g, ' '); // nbsp
        }
        if (sText.includes(" ")) {
            sText = sText.replace(/ /g, ' '); // snbsp
        }
        if (sText.includes("'")) {
            sText = sText.replace(/'/g, "’");
        }
        if (sText.includes("‑")) {
            sText = sText.replace(/‑/g, "-"); // nobreakdash
        }

        // parse sentence
        for (let [iStart, iEnd] of this._getSentenceBoundaries(sText)) {
            if (4 < (iEnd - iStart) < 2000) {
                dDA.clear();
                //helpers.echo(sText.slice(iStart, iEnd));
                try {
                    [, errs] = this._proofread(sText.slice(iStart, iEnd), sAlt.slice(iStart, iEnd), iStart, false, dDA, dPriority, sCountry, bDebug, bContext);
                    dErrors.gl_update(errs);
                }
                catch (e) {
                    helpers.logerror(e);
                }
            }
        }
        return Array.from(dErrors.values());
    },

    _zEndOfSentence: new RegExp ('([.?!:;…][ .?!… »”")]*|.$)', "g"),
    _zBeginOfParagraph: new RegExp ("^[-  –—.,;?!…]*", "ig"),
    _zEndOfParagraph: new RegExp ("[-  .,;?!…–—]*$", "ig"),

    _getSentenceBoundaries: function* (sText) {
        let mBeginOfSentence = this._zBeginOfParagraph.exec(sText);
        let iStart = this._zBeginOfParagraph.lastIndex;
        let m;
        while ((m = this._zEndOfSentence.exec(sText)) !== null) {
            yield [iStart, this._zEndOfSentence.lastIndex];
            iStart = this._zEndOfSentence.lastIndex;
        }
    },

    _proofread: function (s, sx, nOffset, bParagraph, dDA, dPriority, sCountry, bDebug, bContext) {
        let dErrs = new Map();
        let bChange = false;
        let bIdRule = option('idrule');
        let m;
        let bCondMemo;
        let nErrorStart;

        for (let [sOption, lRuleGroup] of this._getRules(bParagraph)) {
            if (!sOption || option(sOption)) {
                for (let [zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions, lGroups, lNegLookBefore] of lRuleGroup) {
                    if (!_aIgnoredRules.has(sRuleId)) {
                        while ((m = zRegex.gl_exec2(s, lGroups, lNegLookBefore)) !== null) {
                            bCondMemo = null;
                            /*if (bDebug) {
                                helpers.echo(">>>> Rule # " + sLineId + " - Text: " + s + " opt: "+ sOption);
                            }*/
                            for (let [sFuncCond, cActionType, sWhat, ...eAct] of lActions) {
                            // action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
                                try {
                                    //helpers.echo(oEvalFunc[sFuncCond]);
                                    bCondMemo = (!sFuncCond || oEvalFunc[sFuncCond](s, sx, m, dDA, sCountry, bCondMemo));
                                    if (bCondMemo) {
                                        switch (cActionType) {
                                            case "-":
                                                // grammar error
                                                //helpers.echo("-> error detected in " + sLineId + "\nzRegex: " + zRegex.source);
                                                nErrorStart = nOffset + m.start[eAct[0]];
                                                if (!dErrs.has(nErrorStart) || nPriority > dPriority.get(nErrorStart)) {
                                                    dErrs.set(nErrorStart, this._createError(s, sx, sWhat, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption, bContext));
                                                    dPriority.set(nErrorStart, nPriority);
                                                }
                                                break;
                                            case "~":
                                                // text processor
                                                //helpers.echo("-> text processor by " + sLineId + "\nzRegex: " + zRegex.source);
                                                s = this._rewrite(s, sWhat, eAct[0], m, bUppercase);
                                                bChange = true;
                                                if (bDebug) {
                                                    helpers.echo("~ " + s + "  -- " + m[eAct[0]] + "  # " + sLineId);
                                                }
                                                break;
                                            case "=":
                                                // disambiguation
                                                //helpers.echo("-> disambiguation by " + sLineId + "\nzRegex: " + zRegex.source);
                                                oEvalFunc[sWhat](s, m, dDA);
                                                if (bDebug) {
                                                    helpers.echo("= " + m[0] + "  # " + sLineId + "\nDA: " + dDA.gl_toString());
                                                }
                                                break;
                                            case ">":
                                                // we do nothing, this test is just a condition to apply all following actions
                                                break;
                                            default:
                                                helpers.echo("# error: unknown action at " + sLineId);
                                        }
                                    } else {
                                        if (cActionType == ">") {
                                            break;
                                        }
                                    }
                                }
                                catch (e) {
                                    helpers.echo(s);
                                    helpers.echo("# line id: " + sLineId + "\n# rule id: " + sRuleId);
                                    helpers.logerror(e);
                                }
                            }
                        }
                    }
                }
            }
        }

        if (bChange) {
            return [s, dErrs];
        }
        return [false, dErrs];
    },

    _createError: function (s, sx, sRepl, nOffset, m, iGroup, sLineId, sRuleId, bUppercase, sMsg, sURL, bIdRule, sOption, bContext) {
        let oErr = {};
        oErr["nStart"] = nOffset + m.start[iGroup];
        oErr["nEnd"] = nOffset + m.end[iGroup];
        oErr["sLineId"] = sLineId;
        oErr["sRuleId"] = sRuleId;
        oErr["sType"] = (sOption) ? sOption : "notype";
        // suggestions
        if (sRepl.slice(0,1) === "=") {
            let sugg = oEvalFunc[sRepl.slice(1)](s, m);
            if (sugg) {
                if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
                    oErr["aSuggestions"] = capitalizeArray(sugg.split("|"));
                } else {
                    oErr["aSuggestions"] = sugg.split("|");
                }
            } else {
                oErr["aSuggestions"] = [];
            }
        } else if (sRepl == "_") {
            oErr["aSuggestions"] = [];
        } else {
            if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
                oErr["aSuggestions"] = capitalizeArray(sRepl.gl_expand(m).split("|"));
            } else {
                oErr["aSuggestions"] = sRepl.gl_expand(m).split("|");
            }
        }
        // Message
        let sMessage = "";
        if (sMsg.slice(0,1) === "=") {
            sMessage = oEvalFunc[sMsg.slice(1)](s, m);
        } else {
            sMessage = sMsg.gl_expand(m);
        }
        if (bIdRule) {
            sMessage += " ##" + sLineId + " #" + sRuleId;
        }
        oErr["sMessage"] = sMessage;
        // URL
        oErr["URL"] = sURL || "";
        // Context
        if (bContext) {
            oErr["sUnderlined"] = sx.slice(m.start[iGroup], m.end[iGroup]);
            oErr["sBefore"] = sx.slice(Math.max(0, m.start[iGroup]-80), m.start[iGroup]);
            oErr["sAfter"] = sx.slice(m.end[iGroup], m.end[iGroup]+80);
        }
        return oErr;
    },

    _rewrite: function (s, sRepl, iGroup, m, bUppercase) {
        // text processor: write sRepl in s at iGroup position"
        let ln = m.end[iGroup] - m.start[iGroup];
        let sNew = "";
        if (sRepl === "*") {
            sNew = " ".repeat(ln);
        } else if (sRepl === ">" || sRepl === "_" || sRepl === "~") {
            sNew = sRepl + " ".repeat(ln-1);
        } else if (sRepl === "@") {
            sNew = "@".repeat(ln);
        } else if (sRepl.slice(0,1) === "=") {
            sNew = oEvalFunc[sRepl.slice(1)](s, m);
            sNew = sNew + " ".repeat(ln-sNew.length);
            if (bUppercase && m[iGroup].slice(0,1).gl_isUpperCase()) {
                sNew = sNew.gl_toCapitalize();
            }
        } else {
            sNew = sRepl.gl_expand(m);
            sNew = sNew + " ".repeat(ln-sNew.length);
        }
        //helpers.echo("\n"+s+"\nstart: "+m.start[iGroup]+" end:"+m.end[iGroup])
        return s.slice(0, m.start[iGroup]) + sNew + s.slice(m.end[iGroup]);
    },

    // Actions on rules

    ignoreRule: function (sRuleId) {
        _aIgnoredRules.add(sRuleId);
    },

    resetIgnoreRules: function () {
        _aIgnoredRules.clear();
    },

    reactivateRule: function (sRuleId) {
        _aIgnoredRules.delete(sRuleId);
    },

    listRules: function* (sFilter=null) {
        // generator: returns tuple (sOption, sLineId, sRuleId)
        try {
            for (let [sOption, lRuleGroup] of this._getRules(true)) {
                for (let [,, sLineId, sRuleId,,] of lRuleGroup) {
                    if (!sFilter || sRuleId.test(sFilter)) {
                        yield [sOption, sLineId, sRuleId];
                    }
                }
            }
            for (let [sOption, lRuleGroup] of this._getRules(false)) {
                for (let [,, sLineId, sRuleId,,] of lRuleGroup) {
                    if (!sFilter || sRuleId.test(sFilter)) {
                        yield [sOption, sLineId, sRuleId];
                    }
                }
            }
        }
        catch (e) {
            helpers.logerror(e);
        }
    },

    _getRules: function (bParagraph) {
        if (!bParagraph) {
            return gc_rules.lSentenceRules;
        }
        return gc_rules.lParagraphRules;
    },

    //// Initialization

    load: function (sContext="JavaScript", sPath="") {
        try {
            if (typeof(require) !== 'undefined') {
                var ibdawg = require("resource://grammalecte/ibdawg.js");
                _oDict = new ibdawg.IBDAWG("${dic_name}.json");
            } else {
                _oDict = new IBDAWG("${dic_name}.json", sPath);
            }
            _sAppContext = sContext;
            _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
        }
        catch (e) {
            helpers.logerror(e);
        }
    },

    getDictionary: function () {

        return _oDict;
    },

    //// Options

    setOption: function (sOpt, bVal) {
        if (_dOptions.has(sOpt)) {
            _dOptions.set(sOpt, bVal);
        }



    },

    setOptions: function (dOpt) {
        _dOptions.gl_updateOnlyExistingKeys(dOpt);
    },

    getOptions: function () {
        return _dOptions;
    },

    getDefaultOptions: function () {
        return gc_options.getOptions(_sAppContext).gl_shallowCopy();
    },

    resetOptions: function () {
        _dOptions = gc_options.getOptions(_sAppContext).gl_shallowCopy();

    }

};



//////// Common functions

function option (sOpt) {
    // return true if option sOpt is active
    return _dOptions.get(sOpt);
}

function displayInfo (dDA, aWord) {
    // for debugging: info of word
    if (!aWord) {
        helpers.echo("> nothing to find");
        return true;
    }
    if (!_dAnalyses.has(aWord[1]) && !_storeMorphFromFSA(aWord[1])) {
        helpers.echo("> not in FSA");
        return true;
    }
    if (dDA.has(aWord[0])) {
        helpers.echo("DA: " + dDA.get(aWord[0]));
    }
    helpers.echo("FSA: " + _dAnalyses.get(aWord[1]));
    return true;
}

function _storeMorphFromFSA (sWord) {
    // retrieves morphologies list from _oDict -> _dAnalyses
    //helpers.echo("register: "+sWord + " " + _oDict.getMorph(sWord).toString())
    _dAnalyses.set(sWord, _oDict.getMorph(sWord));
    return !!_dAnalyses.get(sWord);
}

function morph (dDA, aWord, sPattern, bStrict=true, bNoWord=false) {
    // analyse a tuple (position, word), return true if sPattern in morphologies (disambiguation on)
    if (!aWord) {
        //helpers.echo("morph: noword, returns " + bNoWord);
        return bNoWord;
    }
    //helpers.echo("aWord: "+aWord.toString());
    if (!_dAnalyses.has(aWord[1]) && !_storeMorphFromFSA(aWord[1])) {
        return false;
    }
    let lMorph = dDA.has(aWord[0]) ? dDA.get(aWord[0]) : _dAnalyses.get(aWord[1]);
    //helpers.echo("lMorph: "+lMorph.toString());
    if (lMorph.length === 0) {
        return false;
    }
    //helpers.echo("***");
    if (bStrict) {
        return lMorph.every(s  =>  (s.search(sPattern) !== -1));
    }
    return lMorph.some(s  =>  (s.search(sPattern) !== -1));
}

function morphex (dDA, aWord, sPattern, sNegPattern, bNoWord=false) {
    // analyse a tuple (position, word), returns true if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)
    if (!aWord) {
        //helpers.echo("morph: noword, returns " + bNoWord);
        return bNoWord;
    }
    //helpers.echo("aWord: "+aWord.toString());
    if (!_dAnalyses.has(aWord[1]) && !_storeMorphFromFSA(aWord[1])) {
        return false;
    }
    let lMorph = dDA.has(aWord[0]) ? dDA.get(aWord[0]) : _dAnalyses.get(aWord[1]);
    //helpers.echo("lMorph: "+lMorph.toString());
    if (lMorph.length === 0) {
        return false;
    }
    //helpers.echo("***");
    // check negative condition
    if (lMorph.some(s  =>  (s.search(sNegPattern) !== -1))) {
        return false;
    }
    // search sPattern
    return lMorph.some(s  =>  (s.search(sPattern) !== -1));
}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486

487
488
489
490
491
492


493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
    // returns a list of sWord's stems
    if (!sWord) {
        return [];
    }
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return [];
    }
    return [ for (s of _dAnalyses.get(sWord))  s.slice(1, s.indexOf(" ")) ];
}


//// functions to get text outside pattern scope

// warning: check compile_rules.py to understand how it works

function nextword (s, iStart, n) {
    // get the nth word of the input string or empty string
    let z = new RegExp("^( +[a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+){" + (n-1).toString() + "} +([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+)", "i");
    let m = z.exec(s.slice(iStart));
    if (!m) {
        return null;
    }
    return [iStart + RegExp.lastIndex - m[2].length, m[2]];
}

function prevword (s, iEnd, n) {
    // get the (-)nth word of the input string or empty string
    let z = new RegExp("([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+) +([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+ +){" + (n-1).toString() + "}$", "i");
    let m = z.exec(s.slice(0, iEnd));
    if (!m) {
        return null;
    }
    return [m.index, m[1]];
}

const _zNextWord = new RegExp ("^ +([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_][a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_-]*)", "i");
const _zPrevWord = new RegExp ("([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_][a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_-]*) +$", "i");

function nextword1 (s, iStart) {
    // get next word (optimization)

    let m = _zNextWord.exec(s.slice(iStart));
    if (!m) {
        return null;
    }
    return [iStart + RegExp.lastIndex - m[1].length, m[1]];
}



function prevword1 (s, iEnd) {
    // get previous word (optimization)
    //echo("prev1, s:"+s);
    //echo("prev1, s.slice(0, iEnd):"+s.slice(0, iEnd));
    let m = _zPrevWord.exec(s.slice(0, iEnd));
    //echo("prev1, m:"+m);
    if (!m) {
        return null;
    }
    //echo("prev1: " + m.index + " " + m[1]);
    return [m.index, m[1]];
}

function look (s, zPattern, zNegPattern=null) {
    // seek zPattern in s (before/after/fulltext), if antipattern zNegPattern not in s
    try {
        if (zNegPattern && zNegPattern.test(s)) {







|









|




|




|







<
<
<


>




|

>
>



<
<

<



<







468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502



503
504
505
506
507
508
509
510
511
512
513
514
515
516


517

518
519
520

521
522
523
524
525
526
527
    // returns a list of sWord's stems
    if (!sWord) {
        return [];
    }
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return [];
    }
    return _dAnalyses.get(sWord).map( s => s.slice(1, s.indexOf(" ")) );
}


//// functions to get text outside pattern scope

// warning: check compile_rules.py to understand how it works

function nextword (s, iStart, n) {
    // get the nth word of the input string or empty string
    let z = new RegExp("^(?: +[a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+){" + (n-1).toString() + "} +([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+)", "ig");
    let m = z.exec(s.slice(iStart));
    if (!m) {
        return null;
    }
    return [iStart + z.lastIndex - m[1].length, m[1]];
}

function prevword (s, iEnd, n) {
    // get the (-)nth word of the input string or empty string
    let z = new RegExp("([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+) +(?:[a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st%_-]+ +){" + (n-1).toString() + "}$", "i");
    let m = z.exec(s.slice(0, iEnd));
    if (!m) {
        return null;
    }
    return [m.index, m[1]];
}




function nextword1 (s, iStart) {
    // get next word (optimization)
    let _zNextWord = new RegExp ("^ +([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_][a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_-]*)", "ig");
    let m = _zNextWord.exec(s.slice(iStart));
    if (!m) {
        return null;
    }
    return [iStart + _zNextWord.lastIndex - m[1].length, m[1]];
}

const _zPrevWord = new RegExp ("([a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_][a-zà-öA-Zø-ÿÀ-Ö0-9Ø-ßĀ-ʯfi-st_-]*) +$", "i");

function prevword1 (s, iEnd) {
    // get previous word (optimization)


    let m = _zPrevWord.exec(s.slice(0, iEnd));

    if (!m) {
        return null;
    }

    return [m.index, m[1]];
}

function look (s, zPattern, zNegPattern=null) {
    // seek zPattern in s (before/after/fulltext), if antipattern zNegPattern not in s
    try {
        if (zNegPattern && zNegPattern.test(s)) {
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
    }
    if (dDA.has(nPos)) {
        return true;
    }
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return true;
    }
    //echo("morph: "+_dAnalyses.get(sWord).toString());
    if (_dAnalyses.get(sWord).length === 1) {
        return true;
    }
    let lSelect = [ for (sMorph of _dAnalyses.get(sWord))  if (sMorph.search(sPattern) !== -1)  sMorph ];
    //echo("lSelect: "+lSelect.toString());
    if (lSelect.length > 0) {
        if (lSelect.length != _dAnalyses.get(sWord).length) {
            dDA.set(nPos, lSelect);
        }
    } else if (lDefault) {
        dDA.set(nPos, lDefaul);
    }







<



|
<







564
565
566
567
568
569
570

571
572
573
574

575
576
577
578
579
580
581
    }
    if (dDA.has(nPos)) {
        return true;
    }
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return true;
    }

    if (_dAnalyses.get(sWord).length === 1) {
        return true;
    }
    let lSelect = _dAnalyses.get(sWord).filter( sMorph => sMorph.search(sPattern) !== -1 );

    if (lSelect.length > 0) {
        if (lSelect.length != _dAnalyses.get(sWord).length) {
            dDA.set(nPos, lSelect);
        }
    } else if (lDefault) {
        dDA.set(nPos, lDefaul);
    }
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598

599
600
601
602
603
604
605
606
607
608
609
610

611
612
613















614
615
616
617
618
619
620
621
622
623
624
    }
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return true;
    }
    if (_dAnalyses.get(sWord).length === 1) {
        return true;
    }
    let lSelect = [ for (sMorph of _dAnalyses.get(sWord))  if (sMorph.search(sPattern) === -1)  sMorph ];
    //echo("lSelect: "+lSelect.toString());
    if (lSelect.length > 0) {
        if (lSelect.length != _dAnalyses.get(sWord).length) {
            dDA.set(nPos, lSelect);
        }
    } else if (lDefault) {
        dDA.set(nPos, lDefault);
    }
    return true;
}

function define (dDA, nPos, lMorph) {
    dDA.set(nPos, lMorph);
    return true;
}


//////// GRAMMAR CHECKER PLUGINS

${pluginsJS}


${callablesJS}



if (typeof(exports) !== 'undefined') {
    exports.load = load;

    exports.parse = parse;
    exports.lang = lang;
    exports.version = version;















    exports.getDictionary = getDictionary;
    exports.setOption = setOption;
    exports.setOptions = setOptions;
    exports.getOptions = getOptions;
    exports.getDefaultOptions = getDefaultOptions;
    exports.resetOptions = resetOptions;
    exports.ignoreRule = ignoreRule;
    exports.reactivateRule = reactivateRule;
    exports.resetIgnoreRules = resetIgnoreRules;
    exports.listRules = listRules;
}







|
<














>











|
>
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
<
<
<
<

591
592
593
594
595
596
597
598

599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650




651
    }
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return true;
    }
    if (_dAnalyses.get(sWord).length === 1) {
        return true;
    }
    let lSelect = _dAnalyses.get(sWord).filter( sMorph => sMorph.search(sPattern) === -1 );

    if (lSelect.length > 0) {
        if (lSelect.length != _dAnalyses.get(sWord).length) {
            dDA.set(nPos, lSelect);
        }
    } else if (lDefault) {
        dDA.set(nPos, lDefault);
    }
    return true;
}

function define (dDA, nPos, lMorph) {
    dDA.set(nPos, lMorph);
    return true;
}


//////// GRAMMAR CHECKER PLUGINS

${pluginsJS}


${callablesJS}



if (typeof(exports) !== 'undefined') {
    exports.lang = gc_engine.lang;
    exports.locales = gc_engine.locales;
    exports.pkg = gc_engine.pkg;
    exports.name = gc_engine.name;
    exports.version = gc_engine.version;
    exports.author = gc_engine.author;
    exports.parse = gc_engine.parse;
    exports._zEndOfSentence = gc_engine._zEndOfSentence;
    exports._zBeginOfParagraph = gc_engine._zBeginOfParagraph;
    exports._zEndOfParagraph = gc_engine._zEndOfParagraph;
    exports._getSentenceBoundaries = gc_engine._getSentenceBoundaries;
    exports._proofread = gc_engine._proofread;
    exports._createError = gc_engine._createError;
    exports._rewrite = gc_engine._rewrite;
    exports.ignoreRule = gc_engine.ignoreRule;
    exports.resetIgnoreRules = gc_engine.resetIgnoreRules;
    exports.reactivateRule = gc_engine.reactivateRule;
    exports.listRules = gc_engine.listRules;
    exports._getRules = gc_engine._getRules;
    exports.load = gc_engine.load;
    exports.getDictionary = gc_engine.getDictionary;
    exports.setOption = gc_engine.setOption;
    exports.setOptions = gc_engine.setOptions;
    exports.getOptions = gc_engine.getOptions;
    exports.getDefaultOptions = gc_engine.getDefaultOptions;
    exports.resetOptions = gc_engine.resetOptions;




}

Modified gc_core/js/lang_core/gc_options.js from [6e45d077d4] to [ba36100a98].

1
2
3
4


5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

23
24
25

26
27
// Options for Grammalecte

${map}



function getOptions (sContext="JavaScript") {
    if (dOpt.hasOwnProperty(sContext)) {
        return dOpt[sContext];
    }
    return dOpt["JavaScript"];
}

const lStructOpt = ${lStructOpt};

const dOpt = {
    "JavaScript": new Map (${dOptJavaScript}),
    "Firefox": new Map (${dOptFirefox}),
    "Thunderbird": new Map (${dOptThunderbird}),
}

const dOptLabel = ${dOptLabel};



if (typeof(exports) !== 'undefined') {
	exports.getOptions = getOptions;
	exports.lStructOpt = lStructOpt;

	exports.dOptLabel = dOptLabel;
}




>
>
|
|
|
|
|
|

|

|
|
|
|
|

|
|

>

|
|
>
|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
// Options for Grammalecte

${map}


var gc_options = {
    getOptions: function (sContext="JavaScript") {
        if (this.dOpt.hasOwnProperty(sContext)) {
            return this.dOpt[sContext];
        }
        return this.dOpt["JavaScript"];
    },

    lStructOpt: ${lStructOpt},

    dOpt: {
        "JavaScript": new Map (${dOptJavaScript}),
        "Firefox": new Map (${dOptFirefox}),
        "Thunderbird": new Map (${dOptThunderbird}),
    },

    dOptLabel: ${dOptLabel}
}


if (typeof(exports) !== 'undefined') {
	exports.getOptions = gc_options.getOptions;
	exports.lStructOpt = gc_options.lStructOpt;
    exports.dOpt = gc_options.dOpt;
	exports.dOptLabel = gc_options.dOptLabel;
}

Modified gc_core/js/lang_core/gc_rules.js from [03bc540fb7] to [02fc1d6f94].

1
2
3
4
5
6

7
8
9

10
11
12
13
14
15
// Grammar checker rules
"use strict";

${string}
${regex}


const lParagraphRules = ${paragraph_rules_JS};

const lSentenceRules = ${sentence_rules_JS};



if (typeof(exports) !== 'undefined') {
	exports.lParagraphRules = lParagraphRules;
	exports.lSentenceRules = lSentenceRules;
}






>
|

|
>



|
|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// Grammar checker rules
"use strict";

${string}
${regex}

var gc_rules = {
    lParagraphRules: ${paragraph_rules_JS},

    lSentenceRules: ${sentence_rules_JS}
}


if (typeof(exports) !== 'undefined') {
    exports.lParagraphRules = gc_rules.lParagraphRules;
    exports.lSentenceRules = gc_rules.lSentenceRules;
}

Modified gc_core/js/str_transform.js from [0fafeda9a5] to [3f33d76266].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34


35
36
37
38
39
40
41
42

43
44
45
46
47
48
49
50
51
52
53
54

55
56
57
58
59
60
//// STRING TRANSFORMATION

var dSimilarChars = new Map ([
    ["a", "aàâáä"],
    ["à", "aàâáä"],
    ["â", "aàâáä"],
    ["á", "aàâáä"],
    ["ä", "aàâáä"],
    ["c", "cç"],
    ["ç", "cç"],
    ["e", "eéêèë"],
    ["é", "eéêèë"],
    ["ê", "eéêèë"],
    ["è", "eéêèë"],
    ["ë", "eéêèë"],
    ["i", "iîïíì"],
    ["î", "iîïíì"],
    ["ï", "iîïíì"],
    ["í", "iîïíì"],
    ["ì", "iîïíì"],
    ["o", "oôóòö"],
    ["ô", "oôóòö"],
    ["ó", "oôóòö"],
    ["ò", "oôóòö"],
    ["ö", "oôóòö"],
    ["u", "uûùüú"],
    ["û", "uûùüú"],
    ["ù", "uûùüú"],
    ["ü", "uûùüú"],
    ["ú", "uûùüú"]
]);

// Note: 48 is the ASCII code for "0"



// Suffix only
function getStemFromSuffixCode (sFlex, sSfxCode) {
    if (sSfxCode == "0") {
        return sFlex;
    }
    return sSfxCode[0] == '0' ? sFlex + sSfxCode.slice(1) : sFlex.slice(0, -(sSfxCode.charCodeAt(0)-48)) + sSfxCode.slice(1);
}


// Prefix and suffix
function getStemFromAffixCode (sFlex, sAffCode) {
    if (sAffCode == "0") {
        return sFlex;
    }
    if (!sAffCode.includes("/")) {
        return "# error #";
    }
    var [sPfxCode, sSfxCode] = sAffCode.split('/');
    sFlex = sPfxCode.slice(1) + sFlex.slice(sPfxCode.charCodeAt(0)-48);
    return sSfxCode[0] == '0' ? sFlex + sSfxCode.slice(1) : sFlex.slice(0, -(sSfxCode.charCodeAt(0)-48)) + sSfxCode.slice(1);
}



if (typeof(exports) !== 'undefined') {
    exports.getStemFromSuffixCode = getStemFromSuffixCode;
    exports.getStemFromAffixCode = getStemFromAffixCode;
}


<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<


>
>
|
<
|
|
|
|
|
|
>
|
<
|
|
|
|
|
|
|
|
|
|
>



|
|

1
2






























3
4
5
6
7

8
9
10
11
12
13
14
15

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
//// STRING TRANSFORMATION































// Note: 48 is the ASCII code for "0"

var str_transform = {
    getStemFromSuffixCode: function (sFlex, sSfxCode) {
        // Suffix only

        if (sSfxCode == "0") {
            return sFlex;
        }
        return sSfxCode[0] == '0' ? sFlex + sSfxCode.slice(1) : sFlex.slice(0, -(sSfxCode.charCodeAt(0)-48)) + sSfxCode.slice(1);
    },
    
    getStemFromAffixCode: function (sFlex, sAffCode) {
        // Prefix and suffix

        if (sAffCode == "0") {
            return sFlex;
        }
        if (!sAffCode.includes("/")) {
            return "# error #";
        }
        let [sPfxCode, sSfxCode] = sAffCode.split('/');
        sFlex = sPfxCode.slice(1) + sFlex.slice(sPfxCode.charCodeAt(0)-48);
        return sSfxCode[0] == '0' ? sFlex + sSfxCode.slice(1) : sFlex.slice(0, -(sSfxCode.charCodeAt(0)-48)) + sSfxCode.slice(1);
    }
};


if (typeof(exports) !== 'undefined') {
    exports.getStemFromSuffixCode = str_transform.getStemFromSuffixCode;
    exports.getStemFromAffixCode = str_transform.getStemFromAffixCode;
}

Modified gc_core/js/tests.js from [f2f737b523] to [abe05a3485].

1
2
3
4
5

6

7
8
9
10
11
12

13
14

15
16
17

18
19
20
21
22
23
24
25
26
27
28
// JavaScript

"use strict";



const helpers = require("resource://grammalecte/helpers.js");



class TestGrammarChecking {

    constructor (gce) {
        this.gce = gce;

        this._aRuleTested = new Set();
    };


    * testParse (bDebug=false) {
        const t0 = Date.now();

        const aData = JSON.parse(helpers.loadFile("resource://grammalecte/"+this.gce.lang+"/tests_data.json")).aData;
        //const aData = require("resource://grammalecte/"+this.gce.lang+"/tests_data.js").aData;
        let nInvalid = 0
        let nTotal = 0
        let sErrorText;
        let sSugg;
        let sExpectedErrors;
        let sTextToCheck;
        let sFoundErrors;
        let sListErr;
        let sLineNum;





>
|
>




|

>

<
>



>
|
<
|
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

17
18
19
20
21
22

23
24
25
26
27
28
29
30
31
// JavaScript

"use strict";


if (typeof(require) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");
}


class TestGrammarChecking {

    constructor (gce, spfTests="") {
        this.gce = gce;
        this.spfTests = spfTests;
        this._aRuleTested = new Set();

    }

    * testParse (bDebug=false) {
        const t0 = Date.now();
        let sURL = (this.spfTests !== "") ? this.spfTests : "resource://grammalecte/"+this.gce.lang+"/tests_data.json";
        const aData = JSON.parse(helpers.loadFile(sURL)).aData;

        let nInvalid = 0;
        let nTotal = 0;
        let sErrorText;
        let sSugg;
        let sExpectedErrors;
        let sTextToCheck;
        let sFoundErrors;
        let sListErr;
        let sLineNum;
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

96
97
98
99
100
101
102
                    if (sExpectedErrors !== sFoundErrors) {
                        yield "\n" + i.toString() +
                              "\n# Line num: " + sLineNum +
                              "\n> to check: " + sTextToCheck +
                              "\n  expected: " + sExpectedErrors +
                              "\n  found:    " + sFoundErrors +
                              "\n  errors:   \n" + sListErr;
                        nInvalid = nInvalid + 1
                    }
                    nTotal = nTotal + 1;
                }
                i = i + 1;
                if (i % 1000 === 0) {
                    yield i.toString();
                }
            }
            bShowUntested = true;
        }
        catch (e) {
            helpers.logerror(e);
        }

        if (bShowUntested) {
            i = 0;
            for (let [sOpt, sLineId, sRuleId] of gce.listRules()) {
                if (!this._aRuleTested.has(sLineId) && !/^[0-9]+[sp]$|^[pd]_/.test(sRuleId)) {
                    sUntestedRules += sRuleId + ", ";
                    i += 1;
                }
            }
            if (i > 0) {
                yield sUntestedRules + "\n[" + i.toString() + " untested rules]";
            }
        }

        const t1 = Date.now();
        yield "Tests parse finished in " + ((t1-t0)/1000).toString()
            + " s\nTotal errors: " + nInvalid.toString() + " / " + nTotal.toString();
    };


    _getExpectedErrors (sLine) {
        try {
            let sRes = " ".repeat(sLine.length);
            let z = /\{\{.+?\}\}/g;
            let m;
            let i = 0;







|
















|













<
>







60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97

98
99
100
101
102
103
104
105
                    if (sExpectedErrors !== sFoundErrors) {
                        yield "\n" + i.toString() +
                              "\n# Line num: " + sLineNum +
                              "\n> to check: " + sTextToCheck +
                              "\n  expected: " + sExpectedErrors +
                              "\n  found:    " + sFoundErrors +
                              "\n  errors:   \n" + sListErr;
                        nInvalid = nInvalid + 1;
                    }
                    nTotal = nTotal + 1;
                }
                i = i + 1;
                if (i % 1000 === 0) {
                    yield i.toString();
                }
            }
            bShowUntested = true;
        }
        catch (e) {
            helpers.logerror(e);
        }

        if (bShowUntested) {
            i = 0;
            for (let [sOpt, sLineId, sRuleId] of this.gce.listRules()) {
                if (!this._aRuleTested.has(sLineId) && !/^[0-9]+[sp]$|^[pd]_/.test(sRuleId)) {
                    sUntestedRules += sRuleId + ", ";
                    i += 1;
                }
            }
            if (i > 0) {
                yield sUntestedRules + "\n[" + i.toString() + " untested rules]";
            }
        }

        const t1 = Date.now();
        yield "Tests parse finished in " + ((t1-t0)/1000).toString()
            + " s\nTotal errors: " + nInvalid.toString() + " / " + nTotal.toString();

    }

    _getExpectedErrors (sLine) {
        try {
            let sRes = " ".repeat(sLine.length);
            let z = /\{\{.+?\}\}/g;
            let m;
            let i = 0;
114
115
116
117
118
119
120
121

122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146

147
148
149
150
151
152
153
            }
            return sRes;
        }
        catch (e) {
            helpers.logerror(e);
        }
        return " ".repeat(sLine.length);
    };


    _getFoundErrors (sLine, bDebug, sOption) {
        try {
            let aErrs = [];
            if (sOption) {
                gce.setOption(sOption, true);
                aErrs = this.gce.parse(sLine, "FR", bDebug);
                gce.setOption(sOption, false);
            } else {
                aErrs = this.gce.parse(sLine, "FR", bDebug);
            }
            let sRes = " ".repeat(sLine.length);
            let sListErr = "";
            for (let dErr of aErrs) {
                sRes = sRes.slice(0, dErr["nStart"]) + "~".repeat(dErr["nEnd"] - dErr["nStart"]) + sRes.slice(dErr["nEnd"]);
                sListErr += "    * {" + dErr['sLineId'] + " / " + dErr['sRuleId'] + "}  at  " + dErr['nStart'] + ":" + dErr['nEnd'] + "\n";
                this._aRuleTested.add(dErr["sLineId"]);
            }
            return [sRes, sListErr];
        }
        catch (e) {
            helpers.logerror(e);
        }
        return [" ".repeat(sLine.length), ""];
    };


}


if (typeof(exports) !== 'undefined') {
    exports.TestGrammarChecking = TestGrammarChecking;
}







<
>





|

|
















<
>







117
118
119
120
121
122
123

124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

149
150
151
152
153
154
155
156
            }
            return sRes;
        }
        catch (e) {
            helpers.logerror(e);
        }
        return " ".repeat(sLine.length);

    }

    _getFoundErrors (sLine, bDebug, sOption) {
        try {
            let aErrs = [];
            if (sOption) {
                this.gce.setOption(sOption, true);
                aErrs = this.gce.parse(sLine, "FR", bDebug);
                this.gce.setOption(sOption, false);
            } else {
                aErrs = this.gce.parse(sLine, "FR", bDebug);
            }
            let sRes = " ".repeat(sLine.length);
            let sListErr = "";
            for (let dErr of aErrs) {
                sRes = sRes.slice(0, dErr["nStart"]) + "~".repeat(dErr["nEnd"] - dErr["nStart"]) + sRes.slice(dErr["nEnd"]);
                sListErr += "    * {" + dErr['sLineId'] + " / " + dErr['sRuleId'] + "}  at  " + dErr['nStart'] + ":" + dErr['nEnd'] + "\n";
                this._aRuleTested.add(dErr["sLineId"]);
            }
            return [sRes, sListErr];
        }
        catch (e) {
            helpers.logerror(e);
        }
        return [" ".repeat(sLine.length), ""];

    }

}


if (typeof(exports) !== 'undefined') {
    exports.TestGrammarChecking = TestGrammarChecking;
}

Modified gc_core/js/text.js from [beffb97d58] to [46a1749c2b].

1
2
3
4


5
6
7


8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
// JavaScript

"use strict";



const helpers = require("resource://grammalecte/helpers.js");




function* getParagraph (sText) {
    // generator: returns paragraphs of text
    let iStart = 0;
    let iEnd = 0;
    sText = sText.replace("\r", "");
    while ((iEnd = sText.indexOf("\n", iStart)) !== -1) {
        yield sText.slice(iStart, iEnd);
        iStart = iEnd + 1;
    }
    yield sText.slice(iStart);
}

function* wrap (sText, nWidth=80) {
    // generator: returns text line by line
    while (sText) {
        if (sText.length >= nWidth) {
            let nEnd = sText.lastIndexOf(" ", nWidth) + 1;
            if (nEnd > 0) {
                yield sText.slice(0, nEnd);
                sText = sText.slice(nEnd);
            } else {
                yield sText.slice(0, nWidth);
                sText = sText.slice(nWidth);
            }
        } else {
            break;
        }
    }
    yield sText;
}

function getReadableError (oErr) {
    // Returns an error oErr as a readable error
    try {
        let sResult = "\n* " + oErr['nStart'] + ":" + oErr['nEnd'] 
                    + "  # " + oErr['sLineId'] + "  # " + oErr['sRuleId'] + ":\n";
        sResult += "  " + oErr["sMessage"];
        if (oErr["aSuggestions"].length > 0) {
            sResult += "\n  > Suggestions : " + oErr["aSuggestions"].join(" | ");
        }
        if (oErr["URL"] !== "") {
            sResult += "\n  > URL: " + oErr["URL"];
        }
        return sResult;
    }
    catch (e) {
        helpers.logerror(e);
        return "\n# Error. Data: " + oErr.toString();
    }
}



if (typeof(exports) !== 'undefined') {
    exports.getParagraph = getParagraph;
    exports.wrap = wrap;
    exports.getReadableError = getReadableError;
}




>
>
|
|

>
>
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>



|
|
|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
// JavaScript

"use strict";


if (typeof(exports) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");
}


var text = {
    getParagraph: function* (sText) {
        // generator: returns paragraphs of text
        let iStart = 0;
        let iEnd = 0;
        sText = sText.replace("\r", "");
        while ((iEnd = sText.indexOf("\n", iStart)) !== -1) {
            yield sText.slice(iStart, iEnd);
            iStart = iEnd + 1;
        }
        yield sText.slice(iStart);
    },

    wrap: function* (sText, nWidth=80) {
        // generator: returns text line by line
        while (sText) {
            if (sText.length >= nWidth) {
                let nEnd = sText.lastIndexOf(" ", nWidth) + 1;
                if (nEnd > 0) {
                    yield sText.slice(0, nEnd);
                    sText = sText.slice(nEnd);
                } else {
                    yield sText.slice(0, nWidth);
                    sText = sText.slice(nWidth);
                }
            } else {
                break;
            }
        }
        yield sText;
    },

    getReadableError: function (oErr) {
        // Returns an error oErr as a readable error
        try {
            let sResult = "\n* " + oErr['nStart'] + ":" + oErr['nEnd'] 
                        + "  # " + oErr['sLineId'] + "  # " + oErr['sRuleId'] + ":\n";
            sResult += "  " + oErr["sMessage"];
            if (oErr["aSuggestions"].length > 0) {
                sResult += "\n  > Suggestions : " + oErr["aSuggestions"].join(" | ");
            }
            if (oErr["URL"] !== "") {
                sResult += "\n  > URL: " + oErr["URL"];
            }
            return sResult;
        }
        catch (e) {
            helpers.logerror(e);
            return "\n# Error. Data: " + oErr.toString();
        }
    }
};


if (typeof(exports) !== 'undefined') {
    exports.getParagraph = text.getParagraph;
    exports.wrap = text.wrap;
    exports.getReadableError = text.getReadableError;
}

Modified gc_core/js/tokenizer.js from [a6594366c3] to [fcd058bf6a].

1
2
3
4
5


6
7


8
9
10
11
12
13
14
15
// JavaScript
// Very simple tokenizer

"use strict";



const helpers = require("resource://grammalecte/helpers.js");



const aPatterns = {
    // All regexps must start with ^.
    "default":
        [
            [/^[   \t]+/, 'SPACE'],
            [/^[,.;:!?…«»“”‘’"(){}\[\]/·–—]+/, 'SEPARATOR'],
            [/^(?:https?:\/\/|www[.]|[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_]+[@.][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_]+[@.])[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_.\/?&!%=+*"'@$#-]+/, 'LINK'],
            [/^[#@][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_-]+/, 'TAG'],





>
>
|
|
>
>
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
// JavaScript
// Very simple tokenizer

"use strict";


if (typeof(exports) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");
}


const aTkzPatterns = {
    // All regexps must start with ^.
    "default":
        [
            [/^[   \t]+/, 'SPACE'],
            [/^[,.;:!?…«»“”‘’"(){}\[\]/·–—]+/, 'SEPARATOR'],
            [/^(?:https?:\/\/|www[.]|[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_]+[@.][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_]+[@.])[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_.\/?&!%=+*"'@$#-]+/, 'LINK'],
            [/^[#@][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_-]+/, 'TAG'],
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

50
51
52
53
54
55
56
            [/^&\w+;(?:\w+;|)/, 'HTMLENTITY'],
            [/^(?:l|d|n|m|t|s|j|c|ç|lorsqu|puisqu|jusqu|quoiqu|qu)['’`]/i, 'ELPFX'],
            [/^\d\d?[hm]\d\d\b/, 'HOUR'],
            [/^\d+(?:er|nd|e|de|ième|ème|eme)s?\b/, 'ORDINAL'],
            [/^-?\d+(?:[.,]\d+|)/, 'NUM'],
            [/^[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st]+(?:[’'`-][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st]+)*/, 'WORD']
        ]
}


class Tokenizer {

    constructor (sLang) {
        this.sLang = sLang;
        if (!aPatterns.hasOwnProperty(sLang)) {
            this.sLang = "default";
        }
        this.aRules = aPatterns[this.sLang];
    };


    * genTokens (sText) {
        let m;
        let i = 0;
        while (sText) {
            let nCut = 1;
            for (let [zRegex, sType] of this.aRules) {







|






|


|
<
>







35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52

53
54
55
56
57
58
59
60
            [/^&\w+;(?:\w+;|)/, 'HTMLENTITY'],
            [/^(?:l|d|n|m|t|s|j|c|ç|lorsqu|puisqu|jusqu|quoiqu|qu)['’`]/i, 'ELPFX'],
            [/^\d\d?[hm]\d\d\b/, 'HOUR'],
            [/^\d+(?:er|nd|e|de|ième|ème|eme)s?\b/, 'ORDINAL'],
            [/^-?\d+(?:[.,]\d+|)/, 'NUM'],
            [/^[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st]+(?:[’'`-][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st]+)*/, 'WORD']
        ]
};


class Tokenizer {

    constructor (sLang) {
        this.sLang = sLang;
        if (!aTkzPatterns.hasOwnProperty(sLang)) {
            this.sLang = "default";
        }
        this.aRules = aTkzPatterns[this.sLang];

    }

    * genTokens (sText) {
        let m;
        let i = 0;
        while (sText) {
            let nCut = 1;
            for (let [zRegex, sType] of this.aRules) {
70
71
72
73
74
75
76
77

78
79
80
81
82
83
84
                catch (e) {
                    helpers.logerror(e);
                }
            }
            i += nCut;
            sText = sText.slice(nCut);
        }
    };


    getSpellingErrors (sText, oDict) {
        let aSpellErr = [];
        for (let oToken of this.genTokens(sText)) {
            if (oToken.sType === 'WORD' && !oDict.isValidToken(oToken.sValue)) {
                aSpellErr.push(oToken);
            }







<
>







74
75
76
77
78
79
80

81
82
83
84
85
86
87
88
                catch (e) {
                    helpers.logerror(e);
                }
            }
            i += nCut;
            sText = sText.slice(nCut);
        }

    }

    getSpellingErrors (sText, oDict) {
        let aSpellErr = [];
        for (let oToken of this.genTokens(sText)) {
            if (oToken.sType === 'WORD' && !oDict.isValidToken(oToken.sValue)) {
                aSpellErr.push(oToken);
            }

Modified gc_core/py/lang_core/gc_engine.py from [4742310603] to [27a1e1120d].

470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492

## functions to get text outside pattern scope

# warning: check compile_rules.py to understand how it works

def nextword (s, iStart, n):
    "get the nth word of the input string or empty string"
    m = re.match("( +[\\w%-]+){" + str(n-1) + "} +([\\w%-]+)", s[iStart:])
    if not m:
        return None
    return (iStart+m.start(2), m.group(2))


def prevword (s, iEnd, n):
    "get the (-)nth word of the input string or empty string"
    m = re.search("([\\w%-]+) +([\\w%-]+ +){" + str(n-1) + "}$", s[:iEnd])
    if not m:
        return None
    return (m.start(1), m.group(1))


def nextword1 (s, iStart):
    "get next word (optimization)"







|


|




|







470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492

## functions to get text outside pattern scope

# warning: check compile_rules.py to understand how it works

def nextword (s, iStart, n):
    "get the nth word of the input string or empty string"
    m = re.match("(?: +[\\w%-]+){" + str(n-1) + "} +([\\w%-]+)", s[iStart:])
    if not m:
        return None
    return (iStart+m.start(1), m.group(1))


def prevword (s, iEnd, n):
    "get the (-)nth word of the input string or empty string"
    m = re.search("([\\w%-]+) +(?:[\\w%-]+ +){" + str(n-1) + "}$", s[:iEnd])
    if not m:
        return None
    return (m.start(1), m.group(1))


def nextword1 (s, iStart):
    "get next word (optimization)"

Modified gc_lang/fr/modules-js/conj.js from [4856ca7520] to [ab8249cda2].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36


37

38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287

288
289
290
291
292
293
294
295
296
297
298
299
// Grammalecte - Conjugueur
// License: GPL 3

"use strict";

${map}


let helpers = null; // module not loaded in Firefox content script

let _oData = {};
let _lVtyp = null;
let _lTags = null;
let _dPatternConj = {};
let _dVerb = {};


if (typeof(exports) !== 'undefined') {
    // used within Grammalecte library
    helpers = require("resource://grammalecte/helpers.js");
    _oData = JSON.parse(helpers.loadFile("resource://grammalecte/fr/conj_data.json"));
    _lVtyp = _oData.lVtyp;
    _lTags = _oData.lTags;
    _dPatternConj = _oData.dPatternConj;
    _dVerb = _oData.dVerb;
} else {
    // used within Firefox content script (conjugation panel).
    // can’t load JSON from here, so we do it in ui.js and send it here.
    self.port.on("provideConjData", function (sData) {
        _oData = JSON.parse(sData);
        _lVtyp = _oData.lVtyp;
        _lTags = _oData.lTags;
        _dPatternConj = _oData.dPatternConj;
        _dVerb = _oData.dVerb;
    });
}





const _zStartVoy = new RegExp("^[aeéiouœê]");
const _zNeedTeuph = new RegExp("[tdc]$");

const _dProSuj = new Map ([ [":1s", "je"], [":1ś", "je"], [":2s", "tu"], [":3s", "il"], [":1p", "nous"], [":2p", "vous"], [":3p", "ils"] ]);
const _dProObj = new Map ([ [":1s", "me "], [":1ś", "me "], [":2s", "te "], [":3s", "se "], [":1p", "nous "], [":2p", "vous "], [":3p", "se "] ]);
const _dProObjEl = new Map ([ [":1s", "m’"], [":1ś", "m’"], [":2s", "t’"], [":3s", "s’"], [":1p", "nous "], [":2p", "vous "], [":3p", "s’"] ]);
const _dImpePro = new Map ([ [":2s", "-toi"], [":1p", "-nous"], [":2p", "-vous"] ]);
const _dImpeProNeg = new Map ([ [":2s", "ne te "], [":1p", "ne nous "], [":2p", "ne vous "] ]);
const _dImpeProEn = new Map ([ [":2s", "-t’en"], [":1p", "-nous-en"], [":2p", "-vous-en"] ]);
const _dImpeProNegEn = new Map ([ [":2s", "ne t’en "], [":1p", "ne nous en "], [":2p", "ne vous en "] ]);

const _dGroup = new Map ([ ["0", "auxiliaire"], ["1", "1ᵉʳ groupe"], ["2", "2ᵉ groupe"], ["3", "3ᵉ groupe"] ]);

const _dTenseIdx = new Map ([ [":PQ", 0], [":Ip", 1], [":Iq", 2], [":Is", 3], [":If", 4], [":K", 5], [":Sp", 6], [":Sq", 7], [":E", 8] ]);


function isVerb (sVerb) {
    return _dVerb.hasOwnProperty(sVerb);
}

function getConj (sVerb, sTense, sWho) {
    // returns conjugation (can be an empty string)
    if (!_dVerb.hasOwnProperty(sVerb)) {
        return null;
    }
    if (!_dPatternConj[sTense][_lTags[_dVerb[sVerb][1]][_dTenseIdx.get(sTense)]].hasOwnProperty(sWho)) {
        return "";
    }
    return _modifyStringWithSuffixCode(sVerb, _dPatternConj[sTense][_lTags[_dVerb[sVerb][1]][_dTenseIdx.get(sTense)]][sWho]);
}

function hasConj (sVerb, sTense, sWho) {
    // returns false if no conjugation (also if empty) else true
    if (!_dVerb.hasOwnProperty(sVerb)) {
        return false;
    }
    if (_dPatternConj[sTense][_lTags[_dVerb[sVerb][1]][_dTenseIdx.get(sTense)]].hasOwnProperty(sWho)
            && _dPatternConj[sTense][_lTags[_dVerb[sVerb][1]][_dTenseIdx.get(sTense)]][sWho]) {
        return true;
    }
    return false;
}

function getVtyp (sVerb) {
    // returns raw informations about sVerb
    if (!_dVerb.hasOwnProperty(sVerb)) {
        return null;
    }
    return _lVtyp[_dVerb[sVerb][0]];
}

function getSimil (sWord, sMorph, sFilter=null) {
    if (!sMorph.includes(":V")) {
        return new Set();
    }
    let sInfi = sMorph.slice(1, sMorph.indexOf(" "));
    let tTags = _getTags(sInfi);
    let aSugg = new Set();
    if (sMorph.includes(":Q") || sMorph.includes(":Y")) {
        // we suggest conjugated forms
        if (sMorph.includes(":V1")) {
            aSugg.add(sInfi);
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Ip", ":3s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Ip", ":2p"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Iq", ":1s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Iq", ":3s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Iq", ":3p"));
        } else if (sMorph.includes(":V2")) {
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Ip", ":1s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Ip", ":3s"));
        } else if (sMorph.includes(":V3")) {
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Ip", ":1s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Ip", ":3s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Is", ":1s"));
            aSugg.add(_getConjWithTags(sInfi, tTags, ":Is", ":3s"));
        } else if (isMorph.includes(":V0a")) {
            aSugg.add("eus");
            aSugg.add("eut");
        } else {
            aSugg.add("étais");
            aSugg.add("était");
        }
        aSugg.delete("");
    } else {
        // we suggest past participles
        aSugg.add(_getConjWithTags(sInfi, tTags, ":PQ", ":Q1"));
        aSugg.add(_getConjWithTags(sInfi, tTags, ":PQ", ":Q2"));
        aSugg.add(_getConjWithTags(sInfi, tTags, ":PQ", ":Q3"));
        aSugg.add(_getConjWithTags(sInfi, tTags, ":PQ", ":Q4"));
        aSugg.delete("");
        // if there is only one past participle (epi inv), unreliable.
        if (aSugg.size === 1) {
            aSugg.clear();
        }
        if (sMorph.includes(":V1")) {
            aSugg.add(sInfi);
        }
    }
    return aSugg;
}


function _getTags (sVerb) {
    // returns tuple of tags (usable with functions _getConjWithTags and _hasConjWithTags)
    if (!_dVerb.hasOwnProperty(sVerb)) {
        return null;
    }
    return _lTags[_dVerb[sVerb][1]];
}

function _getConjWithTags (sVerb, tTags, sTense, sWho) {
    // returns conjugation (can be an empty string)
    if (!_dPatternConj[sTense][tTags[_dTenseIdx.get(sTense)]].hasOwnProperty(sWho)) {
        return "";
    }
    return _modifyStringWithSuffixCode(sVerb, _dPatternConj[sTense][tTags[_dTenseIdx.get(sTense)]][sWho]);
}

function _hasConjWithTags (tTags, sTense, sWho) {
    // returns false if no conjugation (also if empty) else true
    if (_dPatternConj[sTense][tTags[_dTenseIdx.get(sTense)]].hasOwnProperty(sWho)
            && _dPatternConj[sTense][tTags[_dTenseIdx.get(sTense)]][sWho]) {
        return true;
    }
    return false;
}

function _modifyStringWithSuffixCode (sWord, sSfx) {
    // returns sWord modified by sSfx
    if (sSfx === "") {
        return "";
    }
    if (sSfx === "0") {
        return sWord;
    }
    try {
        if (sSfx[0] !== '0') {
            return sWord.slice(0, -(sSfx.charCodeAt(0)-48)) + sSfx.slice(1); // 48 is the ASCII code for "0"
        } else {
            return sWord + sSfx.slice(1);
        }
    }
    catch (e) {
        console.log(e);
        return "## erreur, code : " + sSfx + " ##";
    }
}



class Verb {

    constructor (sVerb) {
        if (typeof sVerb !== "string" || sVerb === "") {
            throw new TypeError ("The value should be a non-empty string");
        }
        this.sVerb = sVerb;
        this.sVerbAux = "";
        this._sRawInfo = getVtyp(this.sVerb);
        this.sInfo = this._readableInfo(this._sRawInfo);
        this._tTags = _getTags(sVerb);
        this._tTagsAux = _getTags(this.sVerbAux);
        this.bProWithEn = (this._sRawInfo[5] === "e");
        this.dConj = new Map ([
            [":Y", new Map ([
                ["label", "Infinitif"],
                [":Y", sVerb]
            ])],
            [":PQ", new Map ([
                ["label", "Participes passés et présent"],
                [":Q1", _getConjWithTags(sVerb, this._tTags, ":PQ", ":Q1")],
                [":Q2", _getConjWithTags(sVerb, this._tTags, ":PQ", ":Q2")],
                [":Q3", _getConjWithTags(sVerb, this._tTags, ":PQ", ":Q3")],
                [":Q4", _getConjWithTags(sVerb, this._tTags, ":PQ", ":Q4")],
                [":P", _getConjWithTags(sVerb, this._tTags, ":PQ", ":P")]
            ])],
            [":Ip", new Map ([
                ["label", "Présent"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":Ip", ":1s")],
                [":1ś", _getConjWithTags(sVerb, this._tTags, ":Ip", ":1ś")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":Ip", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":Ip", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":Ip", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":Ip", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":Ip", ":3p")]
            ])],
            [":Iq", new Map ([
                ["label", "Imparfait"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":Iq", ":1s")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":Iq", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":Iq", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":Iq", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":Iq", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":Iq", ":3p")]
            ])],
            [":Is", new Map ([
                ["label", "Passé simple"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":Is", ":1s")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":Is", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":Is", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":Is", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":Is", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":Is", ":3p")]
            ])],
            [":If", new Map ([
                ["label", "Futur"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":If", ":1s")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":If", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":If", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":If", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":If", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":If", ":3p")]
            ])],
            [":Sp", new Map ([
                ["label", "Présent subjonctif"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":Sp", ":1s")],
                [":1ś", _getConjWithTags(sVerb, this._tTags, ":Sp", ":1ś")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":Sp", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":Sp", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":Sp", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":Sp", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":Sp", ":3p")]
            ])],
            [":Sq", new Map ([
                ["label", "Imparfait subjonctif"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":Sq", ":1s")],
                [":1ś", _getConjWithTags(sVerb, this._tTags, ":Sq", ":1ś")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":Sq", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":Sq", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":Sq", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":Sq", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":Sq", ":3p")]
            ])],
            [":K", new Map ([
                ["label", "Conditionnel"],
                [":1s", _getConjWithTags(sVerb, this._tTags, ":K", ":1s")],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":K", ":2s")],
                [":3s", _getConjWithTags(sVerb, this._tTags, ":K", ":3s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":K", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":K", ":2p")],
                [":3p", _getConjWithTags(sVerb, this._tTags, ":K", ":3p")]
            ])],
            [":E", new Map ([
                ["label", "Impératif"],
                [":2s", _getConjWithTags(sVerb, this._tTags, ":E", ":2s")],
                [":1p", _getConjWithTags(sVerb, this._tTags, ":E", ":1p")],
                [":2p", _getConjWithTags(sVerb, this._tTags, ":E", ":2p")]
            ])]
        ]);
    };


    _readableInfo () {
        // returns readable infos
        this.sVerbAux = (this._sRawInfo.slice(7,8) == "e") ? "être" : "avoir";
        let sGroup = _dGroup.get(this._sRawInfo[0]);
        let sInfo = "";
        if (this._sRawInfo.slice(3,4) == "t") {
            sInfo = "transitif";
        } else if (this._sRawInfo.slice(4,5) == "n") {
            sInfo = "transitif indirect";
        } else if (this._sRawInfo.slice(2,3) == "i") {
            sInfo = "intransitif";








<
|
<
|
|
|
|

|
<
<
<
<
<
<
<
<
|
<
<
<
|
|
|
|
|
<
|
>
>
|
>

|
|

|
|
|
|
|
|
|

|

|

<
|
|
|

|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
<
|
|
|
|
|
|

|
|
|
|
|
|
|

|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>










|

|
|








|
|
|
|
|



|
|
|
|
|
|
|



|
|
|
|
|
|



|
|
|
|
|
|



|
|
|
|
|
|



|
|
|
|
|
|
|



|
|
|
|
|
|
|



|
|
|
|
|
|



|
|
|


<
>




|







1
2
3
4
5
6
7
8

9

10
11
12
13
14
15








16



17
18
19
20
21

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42

43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274

275
276
277
278
279
280
281
282
283
284
285
286
287
// Grammalecte - Conjugueur
// License: GPL 3

"use strict";

${map}



var conj = {

    _lVtyp: [],
    _lTags: [],
    _dPatternConj: {},
    _dVerb: {},

    init: function (sJSONData) {








        try {



            let _oData = JSON.parse(sJSONData);
            this._lVtyp = _oData.lVtyp;
            this._lTags = _oData.lTags;
            this._dPatternConj = _oData.dPatternConj;
            this._dVerb = _oData.dVerb;

        }
        catch (e) {
            console.error(e);
        }
    },

    _zStartVoy: new RegExp("^[aeéiouœê]"),
    _zNeedTeuph: new RegExp("[tdc]$"),

    _dProSuj: new Map ([ [":1s", "je"], [":1ś", "je"], [":2s", "tu"], [":3s", "il"], [":1p", "nous"], [":2p", "vous"], [":3p", "ils"] ]),
    _dProObj: new Map ([ [":1s", "me "], [":1ś", "me "], [":2s", "te "], [":3s", "se "], [":1p", "nous "], [":2p", "vous "], [":3p", "se "] ]),
    _dProObjEl: new Map ([ [":1s", "m’"], [":1ś", "m’"], [":2s", "t’"], [":3s", "s’"], [":1p", "nous "], [":2p", "vous "], [":3p", "s’"] ]),
    _dImpePro: new Map ([ [":2s", "-toi"], [":1p", "-nous"], [":2p", "-vous"] ]),
    _dImpeProNeg: new Map ([ [":2s", "ne te "], [":1p", "ne nous "], [":2p", "ne vous "] ]),
    _dImpeProEn: new Map ([ [":2s", "-t’en"], [":1p", "-nous-en"], [":2p", "-vous-en"] ]),
    _dImpeProNegEn: new Map ([ [":2s", "ne t’en "], [":1p", "ne nous en "], [":2p", "ne vous en "] ]),

    _dGroup: new Map ([ ["0", "auxiliaire"], ["1", "1ᵉʳ groupe"], ["2", "2ᵉ groupe"], ["3", "3ᵉ groupe"] ]),

    _dTenseIdx: new Map ([ [":PQ", 0], [":Ip", 1], [":Iq", 2], [":Is", 3], [":If", 4], [":K", 5], [":Sp", 6], [":Sq", 7], [":E", 8] ]),


    isVerb: function (sVerb) {
        return this._dVerb.hasOwnProperty(sVerb);
    },

    getConj: function (sVerb, sTense, sWho) {
        // returns conjugation (can be an empty string)
        if (!this._dVerb.hasOwnProperty(sVerb)) {
            return null;
        }
        if (!this._dPatternConj[sTense][this._lTags[this._dVerb[sVerb][1]][this._dTenseIdx.get(sTense)]].hasOwnProperty(sWho)) {
            return "";
        }
        return this._modifyStringWithSuffixCode(sVerb, this._dPatternConj[sTense][this._lTags[this._dVerb[sVerb][1]][this._dTenseIdx.get(sTense)]][sWho]);
    },

    hasConj: function (sVerb, sTense, sWho) {
        // returns false if no conjugation (also if empty) else true
        if (!this._dVerb.hasOwnProperty(sVerb)) {
            return false;
        }
        if (this._dPatternConj[sTense][this._lTags[this._dVerb[sVerb][1]][this._dTenseIdx.get(sTense)]].hasOwnProperty(sWho)
                && this._dPatternConj[sTense][this._lTags[this._dVerb[sVerb][1]][this._dTenseIdx.get(sTense)]][sWho]) {
            return true;
        }
        return false;
    },

    getVtyp: function (sVerb) {
        // returns raw informations about sVerb
        if (!this._dVerb.hasOwnProperty(sVerb)) {
            return null;
        }
        return this._lVtyp[this._dVerb[sVerb][0]];
    },

    getSimil: function (sWord, sMorph, sFilter=null) {
        if (!sMorph.includes(":V")) {
            return new Set();
        }
        let sInfi = sMorph.slice(1, sMorph.indexOf(" "));
        let tTags = this._getTags(sInfi);
        let aSugg = new Set();
        if (sMorph.includes(":Q") || sMorph.includes(":Y")) {
            // we suggest conjugated forms
            if (sMorph.includes(":V1")) {
                aSugg.add(sInfi);
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Ip", ":3s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Ip", ":2p"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Iq", ":1s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Iq", ":3s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Iq", ":3p"));
            } else if (sMorph.includes(":V2")) {
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Ip", ":1s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Ip", ":3s"));
            } else if (sMorph.includes(":V3")) {
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Ip", ":1s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Ip", ":3s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Is", ":1s"));
                aSugg.add(this._getConjWithTags(sInfi, tTags, ":Is", ":3s"));
            } else if (sMorph.includes(":V0a")) {
                aSugg.add("eus");
                aSugg.add("eut");
            } else {
                aSugg.add("étais");
                aSugg.add("était");
            }
            aSugg.delete("");
        } else {
            // we suggest past participles
            aSugg.add(this._getConjWithTags(sInfi, tTags, ":PQ", ":Q1"));
            aSugg.add(this._getConjWithTags(sInfi, tTags, ":PQ", ":Q2"));
            aSugg.add(this._getConjWithTags(sInfi, tTags, ":PQ", ":Q3"));
            aSugg.add(this._getConjWithTags(sInfi, tTags, ":PQ", ":Q4"));
            aSugg.delete("");
            // if there is only one past participle (epi inv), unreliable.
            if (aSugg.size === 1) {
                aSugg.clear();
            }
            if (sMorph.includes(":V1")) {
                aSugg.add(sInfi);
            }
        }
        return aSugg;
    },

    _getTags: function (sVerb) {

        // returns tuple of tags (usable with functions _getConjWithTags and _hasConjWithTags)
        if (!this._dVerb.hasOwnProperty(sVerb)) {
            return null;
        }
        return this._lTags[this._dVerb[sVerb][1]];
    },

    _getConjWithTags: function (sVerb, tTags, sTense, sWho) {
        // returns conjugation (can be an empty string)
        if (!this._dPatternConj[sTense][tTags[this._dTenseIdx.get(sTense)]].hasOwnProperty(sWho)) {
            return "";
        }
        return this._modifyStringWithSuffixCode(sVerb, this._dPatternConj[sTense][tTags[this._dTenseIdx.get(sTense)]][sWho]);
    },

    _hasConjWithTags: function (tTags, sTense, sWho) {
        // returns false if no conjugation (also if empty) else true
        if (this._dPatternConj[sTense][tTags[this._dTenseIdx.get(sTense)]].hasOwnProperty(sWho)
                && this._dPatternConj[sTense][tTags[this._dTenseIdx.get(sTense)]][sWho]) {
            return true;
        }
        return false;
    },

    _modifyStringWithSuffixCode: function (sWord, sSfx) {
        // returns sWord modified by sSfx
        if (sSfx === "") {
            return "";
        }
        if (sSfx === "0") {
            return sWord;
        }
        try {
            if (sSfx[0] !== '0') {
                return sWord.slice(0, -(sSfx.charCodeAt(0)-48)) + sSfx.slice(1); // 48 is the ASCII code for "0"
            } else {
                return sWord + sSfx.slice(1);
            }
        }
        catch (e) {
            console.log(e);
            return "## erreur, code : " + sSfx + " ##";
        }
    }
};


class Verb {

    constructor (sVerb) {
        if (typeof sVerb !== "string" || sVerb === "") {
            throw new TypeError ("The value should be a non-empty string");
        }
        this.sVerb = sVerb;
        this.sVerbAux = "";
        this._sRawInfo = conj.getVtyp(this.sVerb);
        this.sInfo = this._readableInfo(this._sRawInfo);
        this._tTags = conj._getTags(sVerb);
        this._tTagsAux = conj._getTags(this.sVerbAux);
        this.bProWithEn = (this._sRawInfo[5] === "e");
        this.dConj = new Map ([
            [":Y", new Map ([
                ["label", "Infinitif"],
                [":Y", sVerb]
            ])],
            [":PQ", new Map ([
                ["label", "Participes passés et présent"],
                [":Q1", conj._getConjWithTags(sVerb, this._tTags, ":PQ", ":Q1")],
                [":Q2", conj._getConjWithTags(sVerb, this._tTags, ":PQ", ":Q2")],
                [":Q3", conj._getConjWithTags(sVerb, this._tTags, ":PQ", ":Q3")],
                [":Q4", conj._getConjWithTags(sVerb, this._tTags, ":PQ", ":Q4")],
                [":P", conj._getConjWithTags(sVerb, this._tTags, ":PQ", ":P")]
            ])],
            [":Ip", new Map ([
                ["label", "Présent"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":1s")],
                [":1ś", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":1ś")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":Ip", ":3p")]
            ])],
            [":Iq", new Map ([
                ["label", "Imparfait"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":Iq", ":1s")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":Iq", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":Iq", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":Iq", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":Iq", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":Iq", ":3p")]
            ])],
            [":Is", new Map ([
                ["label", "Passé simple"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":Is", ":1s")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":Is", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":Is", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":Is", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":Is", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":Is", ":3p")]
            ])],
            [":If", new Map ([
                ["label", "Futur"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":If", ":1s")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":If", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":If", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":If", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":If", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":If", ":3p")]
            ])],
            [":Sp", new Map ([
                ["label", "Présent subjonctif"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":1s")],
                [":1ś", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":1ś")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":Sp", ":3p")]
            ])],
            [":Sq", new Map ([
                ["label", "Imparfait subjonctif"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":1s")],
                [":1ś", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":1ś")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":Sq", ":3p")]
            ])],
            [":K", new Map ([
                ["label", "Conditionnel"],
                [":1s", conj._getConjWithTags(sVerb, this._tTags, ":K", ":1s")],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":K", ":2s")],
                [":3s", conj._getConjWithTags(sVerb, this._tTags, ":K", ":3s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":K", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":K", ":2p")],
                [":3p", conj._getConjWithTags(sVerb, this._tTags, ":K", ":3p")]
            ])],
            [":E", new Map ([
                ["label", "Impératif"],
                [":2s", conj._getConjWithTags(sVerb, this._tTags, ":E", ":2s")],
                [":1p", conj._getConjWithTags(sVerb, this._tTags, ":E", ":1p")],
                [":2p", conj._getConjWithTags(sVerb, this._tTags, ":E", ":2p")]
            ])]
        ]);

    }

    _readableInfo () {
        // returns readable infos
        this.sVerbAux = (this._sRawInfo.slice(7,8) == "e") ? "être" : "avoir";
        let sGroup = conj._dGroup.get(this._sRawInfo[0]);
        let sInfo = "";
        if (this._sRawInfo.slice(3,4) == "t") {
            sInfo = "transitif";
        } else if (this._sRawInfo.slice(4,5) == "n") {
            sInfo = "transitif indirect";
        } else if (this._sRawInfo.slice(2,3) == "i") {
            sInfo = "intransitif";
308
309
310
311
312
313
314
315

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341

342
343
344
345

346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378

379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429

430
431
432
433
434
435
436
437
438
439
440
441
442

443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475

476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491

492















493
494





495











496
497
498
499
500
501

502

503
504
        if (this._sRawInfo.slice(6,7) == "m") {
            sInfo = sInfo + " impersonnel";
        }
        if (sInfo === "") {
            sInfo = "# erreur - code : " + this._sRawInfo;
        }
        return sGroup + " · " + sInfo;
    };


    infinitif (bPro, bNeg, bTpsCo, bInt, bFem) {
        let sInfi;
        if (bTpsCo) {
            sInfi = (bPro) ? "être" : this.sVerbAux;
        } else {
            sInfi = this.sVerb;
        }
        if (bPro) {
            if (this.bProWithEn) {
                sInfi = "s’en " + sInfi;
            } else {
                sInfi = (_zStartVoy.test(sInfi)) ? "s’" + sInfi : "se " + sInfi;
            }
        }
        if (bNeg) {
            sInfi = "ne pas " + sInfi;
        }
        if (bTpsCo) {
            sInfi += " " + this._seekPpas(bPro, bFem, (this._sRawInfo[5] == "r"));
        }
        if (bInt) {
            sInfi += " … ?";
        }
        return sInfi;
    };


    participePasse (sWho) {
        return this.dConj.get(":PQ").get(sWho);
    };


    participePresent (bPro, bNeg, bTpsCo, bInt, bFem) {
        if (!this.dConj.get(":PQ").get(":P")) {
            return "";
        }
        let sPartPre;
        if (bTpsCo) {
            sPartPre = (!bPro) ? _getConjWithTags(this.sVerbAux, this._tTagsAux, ":PQ", ":P") : getConj("être", ":PQ", ":P");
        } else {
            sPartPre = this.dConj.get(":PQ").get(":P");
        }
        if (sPartPre === "") {
            return "";
        }
        let bEli = _zStartVoy.test(sPartPre);
        if (bPro) {
            if (this.bProWithEn) {
                sPartPre = "s’en " + sPartPre;
            } else {
                sPartPre = (bEli) ? "s’" + sPartPre : "se " + sPartPre;
            }
        }
        if (bNeg) {
            sPartPre = (bEli && !bPro) ? "n’" + sPartPre + " pas" : "ne " + sPartPre + " pas";
        }
        if (bTpsCo) {
            sPartPre += " " + this._seekPpas(bPro, bFem, this._sRawInfo[5] == "r");
        }
        if (bInt) {
            sPartPre += " … ?";
        }
        return sPartPre;
    };


    conjugue (sTemps, sWho, bPro, bNeg, bTpsCo, bInt, bFem) {
        if (!this.dConj.get(sTemps).get(sWho)) {
            return "";
        }
        let sConj;
        if (!bTpsCo && bInt && sWho == ":1s" && this.dConj.get(sTemps).gl_get(":1ś", false)) {
            sWho = ":1ś";
        }
        if (bTpsCo) {
            sConj = (!bPro) ? _getConjWithTags(this.sVerbAux, this._tTagsAux, sTemps, sWho) : getConj("être", sTemps, sWho);
        } else {
            sConj = this.dConj.get(sTemps).get(sWho);
        }
        if (sConj === "") {
            return "";
        }
        let bEli = _zStartVoy.test(sConj);
        if (bPro) {
            if (!this.bProWithEn) {
                sConj = (bEli) ? _dProObjEl.get(sWho) + sConj : _dProObj.get(sWho) + sConj;
            } else {
                sConj = _dProObjEl.get(sWho) + "en " + sConj;
            }
        }
        if (bNeg) {
            sConj = (bEli && !bPro) ? "n’" + sConj : "ne " + sConj;
        }
        if (bInt) {
            if (sWho == ":3s" && !_zNeedTeuph.test(sConj)) {
                sConj += "-t";
            }
            sConj += "-" + this._getPronom(sWho, bFem);
        } else {
            if (sWho == ":1s" && bEli && !bNeg && !bPro) {
                sConj = "j’" + sConj;
            } else {
                sConj = this._getPronom(sWho, bFem) + " " + sConj;
            }
        }
        if (bNeg) {
            sConj += " pas";
        }
        if (bTpsCo) {
            sConj += " " + this._seekPpas(bPro, bFem, sWho.endsWith("p") || this._sRawInfo[5] == "r");
        }
        if (bInt) {
            sConj += " … ?";
        }
        return sConj;
    };


    _getPronom (sWho, bFem) {
        if (sWho == ":3s") {
            if (this._sRawInfo[5] == "r") {
                return "on";
            } else if (bFem) {
                return "elle";
            }
        } else if (sWho == ":3p" && bFem) {
            return "elles";
        }
        return _dProSuj.get(sWho);
    };


    imperatif (sWho, bPro, bNeg, bTpsCo, bFem) {
        if (!this.dConj.get(":E").get(sWho)) {
            return "";
        }
        let sImpe;
        if (bTpsCo) {
            sImpe = (!bPro) ? _getConjWithTags(this.sVerbAux, this._tTagsAux, ":E", sWho) : getConj("être", ":E", sWho);
        } else {
            sImpe = this.dConj.get(":E").get(sWho);
        }
        if (sImpe === "") {
            return "";
        }
        let bEli = _zStartVoy.test(sImpe);
        if (bNeg) {
            if (bPro) {
                if (!this.bProWithEn) {
                    sImpe = (bEli && sWho == ":2s") ? "ne t’" + sImpe + " pas" : _dImpeProNeg.get(sWho) + sImpe + " pas";
                } else {
                    sImpe = _dImpeProNegEn.get(sWho) + sImpe + " pas";
                }
            } else {
                sImpe = (bEli) ? "n’" + sImpe + " pas" : "ne " + sImpe + " pas";
            }
        } else if (bPro) {
            sImpe = (this.bProWithEn) ? sImpe + _dImpeProEn.get(sWho) : sImpe + _dImpePro.get(sWho);
        }
        if (bTpsCo) {
            return sImpe + " " + this._seekPpas(bPro, bFem, sWho.endsWith("p") || this._sRawInfo[5] == "r");
        }
        return sImpe;
    };


    _seekPpas (bPro, bFem, bPlur) {
        if (!bPro && this.sVerbAux == "avoir") {
            return this.dConj.get(":PQ").get(":Q1");
        }
        if (!bFem) {
            return (bPlur && this.dConj.get(":PQ").get(":Q2")) ? this.dConj.get(":PQ").get(":Q2") : this.dConj.get(":PQ").get(":Q1");
        }
        if (!bPlur) {
            return (this.dConj.get(":PQ").get(":Q3")) ? this.dConj.get(":PQ").get(":Q3") : this.dConj.get(":PQ").get(":Q1");
        }
        return (this.dConj.get(":PQ").get(":Q4")) ? this.dConj.get(":PQ").get(":Q4") : this.dConj.get(":PQ").get(":Q1");
    }
}



if (typeof(exports) !== 'undefined') {















    // Used for Grammalecte library.
    // In content scripts, these variable are directly reachable





    exports.Verb = Verb;











    exports.isVerb = isVerb;
    exports.getConj = getConj;
    exports.hasConj = hasConj;
    exports.getVtyp = getVtyp;
    exports.getSimil = getSimil;
    exports._getTags = _getTags;

    exports._hasConjWithTags = _hasConjWithTags;

    exports._getConjWithTags = _getConjWithTags;
}







<
>












|












<
>



<
>







|






|

















<
>










|






|


|

|






|




















<
>











|
<
>







|






|



|

|





|





<
>
















>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
>
>
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
>
|
>
|

296
297
298
299
300
301
302

303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328

329
330
331
332

333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416

417
418
419
420
421
422
423
424
425
426
427
428
429

430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462

463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
        if (this._sRawInfo.slice(6,7) == "m") {
            sInfo = sInfo + " impersonnel";
        }
        if (sInfo === "") {
            sInfo = "# erreur - code : " + this._sRawInfo;
        }
        return sGroup + " · " + sInfo;

    }

    infinitif (bPro, bNeg, bTpsCo, bInt, bFem) {
        let sInfi;
        if (bTpsCo) {
            sInfi = (bPro) ? "être" : this.sVerbAux;
        } else {
            sInfi = this.sVerb;
        }
        if (bPro) {
            if (this.bProWithEn) {
                sInfi = "s’en " + sInfi;
            } else {
                sInfi = (conj._zStartVoy.test(sInfi)) ? "s’" + sInfi : "se " + sInfi;
            }
        }
        if (bNeg) {
            sInfi = "ne pas " + sInfi;
        }
        if (bTpsCo) {
            sInfi += " " + this._seekPpas(bPro, bFem, (this._sRawInfo[5] == "r"));
        }
        if (bInt) {
            sInfi += " … ?";
        }
        return sInfi;

    }

    participePasse (sWho) {
        return this.dConj.get(":PQ").get(sWho);

    }

    participePresent (bPro, bNeg, bTpsCo, bInt, bFem) {
        if (!this.dConj.get(":PQ").get(":P")) {
            return "";
        }
        let sPartPre;
        if (bTpsCo) {
            sPartPre = (!bPro) ? conj._getConjWithTags(this.sVerbAux, this._tTagsAux, ":PQ", ":P") : conj.getConj("être", ":PQ", ":P");
        } else {
            sPartPre = this.dConj.get(":PQ").get(":P");
        }
        if (sPartPre === "") {
            return "";
        }
        let bEli = conj._zStartVoy.test(sPartPre);
        if (bPro) {
            if (this.bProWithEn) {
                sPartPre = "s’en " + sPartPre;
            } else {
                sPartPre = (bEli) ? "s’" + sPartPre : "se " + sPartPre;
            }
        }
        if (bNeg) {
            sPartPre = (bEli && !bPro) ? "n’" + sPartPre + " pas" : "ne " + sPartPre + " pas";
        }
        if (bTpsCo) {
            sPartPre += " " + this._seekPpas(bPro, bFem, this._sRawInfo[5] == "r");
        }
        if (bInt) {
            sPartPre += " … ?";
        }
        return sPartPre;

    }

    conjugue (sTemps, sWho, bPro, bNeg, bTpsCo, bInt, bFem) {
        if (!this.dConj.get(sTemps).get(sWho)) {
            return "";
        }
        let sConj;
        if (!bTpsCo && bInt && sWho == ":1s" && this.dConj.get(sTemps).gl_get(":1ś", false)) {
            sWho = ":1ś";
        }
        if (bTpsCo) {
            sConj = (!bPro) ? conj._getConjWithTags(this.sVerbAux, this._tTagsAux, sTemps, sWho) : conj.getConj("être", sTemps, sWho);
        } else {
            sConj = this.dConj.get(sTemps).get(sWho);
        }
        if (sConj === "") {
            return "";
        }
        let bEli = conj._zStartVoy.test(sConj);
        if (bPro) {
            if (!this.bProWithEn) {
                sConj = (bEli) ? conj._dProObjEl.get(sWho) + sConj : conj._dProObj.get(sWho) + sConj;
            } else {
                sConj = conj._dProObjEl.get(sWho) + "en " + sConj;
            }
        }
        if (bNeg) {
            sConj = (bEli && !bPro) ? "n’" + sConj : "ne " + sConj;
        }
        if (bInt) {
            if (sWho == ":3s" && !conj._zNeedTeuph.test(sConj)) {
                sConj += "-t";
            }
            sConj += "-" + this._getPronom(sWho, bFem);
        } else {
            if (sWho == ":1s" && bEli && !bNeg && !bPro) {
                sConj = "j’" + sConj;
            } else {
                sConj = this._getPronom(sWho, bFem) + " " + sConj;
            }
        }
        if (bNeg) {
            sConj += " pas";
        }
        if (bTpsCo) {
            sConj += " " + this._seekPpas(bPro, bFem, sWho.endsWith("p") || this._sRawInfo[5] == "r");
        }
        if (bInt) {
            sConj += " … ?";
        }
        return sConj;

    }

    _getPronom (sWho, bFem) {
        if (sWho == ":3s") {
            if (this._sRawInfo[5] == "r") {
                return "on";
            } else if (bFem) {
                return "elle";
            }
        } else if (sWho == ":3p" && bFem) {
            return "elles";
        }
        return conj._dProSuj.get(sWho);

    }

    imperatif (sWho, bPro, bNeg, bTpsCo, bFem) {
        if (!this.dConj.get(":E").get(sWho)) {
            return "";
        }
        let sImpe;
        if (bTpsCo) {
            sImpe = (!bPro) ? conj._getConjWithTags(this.sVerbAux, this._tTagsAux, ":E", sWho) : conj.getConj("être", ":E", sWho);
        } else {
            sImpe = this.dConj.get(":E").get(sWho);
        }
        if (sImpe === "") {
            return "";
        }
        let bEli = conj._zStartVoy.test(sImpe);
        if (bNeg) {
            if (bPro) {
                if (!this.bProWithEn) {
                    sImpe = (bEli && sWho == ":2s") ? "ne t’" + sImpe + " pas" : conj._dImpeProNeg.get(sWho) + sImpe + " pas";
                } else {
                    sImpe = conj._dImpeProNegEn.get(sWho) + sImpe + " pas";
                }
            } else {
                sImpe = (bEli) ? "n’" + sImpe + " pas" : "ne " + sImpe + " pas";
            }
        } else if (bPro) {
            sImpe = (this.bProWithEn) ? sImpe + conj._dImpeProEn.get(sWho) : sImpe + conj._dImpePro.get(sWho);
        }
        if (bTpsCo) {
            return sImpe + " " + this._seekPpas(bPro, bFem, sWho.endsWith("p") || this._sRawInfo[5] == "r");
        }
        return sImpe;

    }

    _seekPpas (bPro, bFem, bPlur) {
        if (!bPro && this.sVerbAux == "avoir") {
            return this.dConj.get(":PQ").get(":Q1");
        }
        if (!bFem) {
            return (bPlur && this.dConj.get(":PQ").get(":Q2")) ? this.dConj.get(":PQ").get(":Q2") : this.dConj.get(":PQ").get(":Q1");
        }
        if (!bPlur) {
            return (this.dConj.get(":PQ").get(":Q3")) ? this.dConj.get(":PQ").get(":Q3") : this.dConj.get(":PQ").get(":Q1");
        }
        return (this.dConj.get(":PQ").get(":Q4")) ? this.dConj.get(":PQ").get(":Q4") : this.dConj.get(":PQ").get(":Q1");
    }
}


// Initialization
if (typeof(browser) !== 'undefined') {
    // WebExtension (but not in Worker)
    conj.init(helpers.loadFile(browser.extension.getURL("grammalecte/fr/conj_data.json")));
} else if (typeof(require) !== 'undefined') {
    // Add-on SDK and Thunderbird
    let helpers = require("resource://grammalecte/helpers.js");
    conj.init(helpers.loadFile("resource://grammalecte/fr/conj_data.json"));
} else if (typeof(self) !== 'undefined' && typeof(self.port) !== 'undefined' && typeof(self.port.on) === "undefined") {
    // used within Firefox content script (conjugation panel).
    // can’t load JSON from here, so we do it in ui.js and send it here.
    self.port.on("provideConjData", function (sJSONData) {
        conj.init(sJSONData);
    });    
} else {
    console.log("Module conj non initialisé");
}


if (typeof(exports) !== 'undefined') {
    exports._lVtyp = conj._lVtyp;
    exports._lTags = conj._lTags;
    exports._dPatternConj = conj._dPatternConj;
    exports._dVerb = conj._dVerb;
    exports.init = conj.init;
    exports._zStartVoy = conj._zStartVoy;
    exports._zNeedTeuph = conj._zNeedTeuph;
    exports._dProSuj = conj._dProSuj;
    exports._dProObj = conj._dProObj;
    exports._dProObjEl = conj._dProObjEl;
    exports._dImpePro = conj._dImpePro;
    exports._dImpeProNeg = conj._dImpeProNeg;
    exports._dImpeProEn = conj._dImpeProEn;
    exports._dImpeProNegEn = conj._dImpeProNegEn;
    exports._dGroup = conj._dGroup;
    exports._dTenseIdx = conj._dTenseIdx;
    exports.isVerb = conj.isVerb;
    exports.getConj = conj.getConj;
    exports.hasConj = conj.hasConj;
    exports.getVtyp = conj.getVtyp;
    exports.getSimil = conj.getSimil;
    exports._getTags = conj._getTags;
    exports._getConjWithTags = conj._getConjWithTags;
    exports._hasConjWithTags = conj._hasConjWithTags;
    exports._modifyStringWithSuffixCode = conj._modifyStringWithSuffixCode;
    exports.Verb = Verb;
}

Modified gc_lang/fr/modules-js/cregex.js from [266ea45a85] to [7faa9e5e5e].

1
2
3

4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264

265
266
267
268


269
270
271
272
273
274
275
276
277
278
279
280
281
282

















283


























284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
//// Grammalecte - Compiled regular expressions



///// Lemme
const zLemma = new RegExp("^>([a-zà-öø-ÿ0-9Ā-ʯ][a-zà-öø-ÿ0-9Ā-ʯ-]+)");

///// Masculin / féminin / singulier / pluriel
const zGender = new RegExp(":[mfe]");
const zNumber = new RegExp(":[spi]");

///// Nom et adjectif
const zNA = new RegExp(":[NA]");

//// nombre
const zNAs = new RegExp(":[NA].*:s");
const zNAp = new RegExp(":[NA].*:p");
const zNAi = new RegExp(":[NA].*:i");
const zNAsi = new RegExp(":[NA].*:[si]");
const zNApi = new RegExp(":[NA].*:[pi]");

//// genre
const zNAm = new RegExp(":[NA].*:m");
const zNAf = new RegExp(":[NA].*:f");
const zNAe = new RegExp(":[NA].*:e");
const zNAme = new RegExp(":[NA].*:[me]");
const zNAfe = new RegExp(":[NA].*:[fe]");

//// nombre et genre
// singuilier
const zNAms = new RegExp(":[NA].*:m.*:s");
const zNAfs = new RegExp(":[NA].*:f.*:s");
const zNAes = new RegExp(":[NA].*:e.*:s");
const zNAmes = new RegExp(":[NA].*:[me].*:s");
const zNAfes = new RegExp(":[NA].*:[fe].*:s");

// singulier et invariable
const zNAmsi = new RegExp(":[NA].*:m.*:[si]");
const zNAfsi = new RegExp(":[NA].*:f.*:[si]");
const zNAesi = new RegExp(":[NA].*:e.*:[si]");
const zNAmesi = new RegExp(":[NA].*:[me].*:[si]");
const zNAfesi = new RegExp(":[NA].*:[fe].*:[si]");

// pluriel
const zNAmp = new RegExp(":[NA].*:m.*:p");
const zNAfp = new RegExp(":[NA].*:f.*:p");
const zNAep = new RegExp(":[NA].*:e.*:p");
const zNAmep = new RegExp(":[NA].*:[me].*:p");
const zNAfep = new RegExp(":[NA].*:[me].*:p");

// pluriel et invariable
const zNAmpi = new RegExp(":[NA].*:m.*:[pi]");
const zNAfpi = new RegExp(":[NA].*:f.*:[pi]");
const zNAepi = new RegExp(":[NA].*:e.*:[pi]");
const zNAmepi = new RegExp(":[NA].*:[me].*:[pi]");
const zNAfepi = new RegExp(":[NA].*:[fe].*:[pi]");

//// Divers
const zAD = new RegExp(":[AB]");

///// Verbe
const zVconj = new RegExp(":[123][sp]");
const zVconj123 = new RegExp(":V[123].*:[123][sp]");

///// Nom | Adjectif | Verbe
const zNVconj = new RegExp(":(?:N|[123][sp])");
const zNAVconj = new RegExp(":(?:N|A|[123][sp])");

///// Spécifique
const zNnotA = new RegExp(":N(?!:A)");
const zPNnotA = new RegExp(":(?:N(?!:A)|Q)");

///// Noms propres
const zNP = new RegExp(":(?:M[12P]|T)");
const zNPm = new RegExp(":(?:M[12P]|T):m");
const zNPf = new RegExp(":(?:M[12P]|T):f");
const zNPe = new RegExp(":(?:M[12P]|T):e");


///// FONCTIONS

function getLemmaOfMorph (sMorph) {
    return zLemma.exec(sMorph)[1];
}

function checkAgreement (l1, l2) {
    // check number agreement
    if (!mbInv(l1) && !mbInv(l2)) {
        if (mbSg(l1) && !mbSg(l2)) {
            return false;
        }
        if (mbPl(l1) && !mbPl(l2)) {
            return false;
        }
    }
    // check gender agreement
    if (mbEpi(l1) || mbEpi(l2)) {
        return true;
    }
    if (mbMas(l1) && !mbMas(l2)) {
        return false;
    }
    if (mbFem(l1) && !mbFem(l2)) {
        return false;
    }
    return true;
}

function checkConjVerb (lMorph, sReqConj) {
    return lMorph.some(s  =>  s.includes(sReqConj));
}

function getGender (lMorph) {
    // returns gender of word (':m', ':f', ':e' or empty string).
    let sGender = "";
    for (let sMorph of lMorph) {
        let m = zGender.exec(sMorph);
        if (m) {
            if (!sGender) {
                sGender = m[0];
            } else if (sGender != m[0]) {
                return ":e";
            }
        }
    }
    return sGender;
}

function getNumber (lMorph) {
    // returns number of word (':s', ':p', ':i' or empty string).
    let sNumber = "";
    for (let sMorph of lMorph) {
        let m = zNumber.exec(sWord);
        if (m) {
            if (!sNumber) {
                sNumber = m[0];
            } else if (sNumber != m[0]) {
                return ":i";
            }
        }
    }
    return sNumber;
}

// NOTE :  isWhat (lMorph)    returns true   if lMorph contains nothing else than What
//         mbWhat (lMorph)    returns true   if lMorph contains What at least once

//// isXXX = it’s certain

function isNom (lMorph) {
    return lMorph.every(s  =>  s.includes(":N"));
}

function isNomNotAdj (lMorph) {
    return lMorph.every(s  =>  zNnotA.test(s));
}

function isAdj (lMorph) {
    return lMorph.every(s  =>  s.includes(":A"));
}

function isNomAdj (lMorph) {
    return lMorph.every(s  =>  zNA.test(s));
}

function isNomVconj (lMorph) {
    return lMorph.every(s  =>  zNVconj.test(s));
}

function isInv (lMorph) {
    return lMorph.every(s  =>  s.includes(":i"));
}
function isSg (lMorph) {
    return lMorph.every(s  =>  s.includes(":s"));
}
function isPl (lMorph) {
    return lMorph.every(s  =>  s.includes(":p"));
}
function isEpi (lMorph) {
    return lMorph.every(s  =>  s.includes(":e"));
}
function isMas (lMorph) {
    return lMorph.every(s  =>  s.includes(":m"));
}
function isFem (lMorph) {
    return lMorph.every(s  =>  s.includes(":f"));
}


//// mbXXX = MAYBE XXX

function mbNom (lMorph) {
    return lMorph.some(s  =>  s.includes(":N"));
}

function mbAdj (lMorph) {
    return lMorph.some(s  =>  s.includes(":A"));
}

function mbAdjNb (lMorph) {
    return lMorph.some(s  =>  zAD.test(s));
}

function mbNomAdj (lMorph) {
    return lMorph.some(s  =>  zNA.test(s));
}

function mbNomNotAdj (lMorph) {
    let b = false;
    for (let s of lMorph) {
        if (s.includes(":A")) {
            return false;
        }
        if (s.includes(":N")) {
            b = true;
        }
    }
    return b;
}

function mbPpasNomNotAdj (lMorph) {
    return lMorph.some(s  =>  zPNnotA.test(s));
}

function mbVconj (lMorph) {
    return lMorph.some(s  =>  zVconj.test(s));
}

function mbVconj123 (lMorph) {
    return lMorph.some(s  =>  zVconj123.test(s));
}

function mbMG (lMorph) {
    return lMorph.some(s  =>  s.includes(":G"));
}

function mbInv (lMorph) {
    return lMorph.some(s  =>  s.includes(":i"));
}
function mbSg (lMorph) {
    return lMorph.some(s  =>  s.includes(":s"));
}
function mbPl (lMorph) {
    return lMorph.some(s  =>  s.includes(":p"));
}
function mbEpi (lMorph) {
    return lMorph.some(s  =>  s.includes(":e"));
}
function mbMas (lMorph) {
    return lMorph.some(s  =>  s.includes(":m"));
}
function mbFem (lMorph) {
    return lMorph.some(s  =>  s.includes(":f"));
}

function mbNpr (lMorph) {
    return lMorph.some(s  =>  zNP.test(s));
}

function mbNprMasNotFem (lMorph) {
    if (lMorph.some(s  =>  zNPf.test(s))) {
        return false;
    }
    return lMorph.some(s  =>  zNPm.test(s));
}



if (typeof(exports) !== 'undefined') {
    exports.getLemmaOfMorph = getLemmaOfMorph;


    exports.checkAgreement = checkAgreement;
    exports.checkConjVerb = checkConjVerb;
    exports.getGender = getGender;
    exports.getNumber = getNumber;
    exports.isNom = isNom;
    exports.isNomNotAdj = isNomNotAdj;
    exports.isAdj = isAdj;
    exports.isNomAdj = isNomAdj;
    exports.isNomVconj = isNomVconj;
    exports.isInv = isInv;
    exports.isSg = isSg;
    exports.isPl = isPl;
    exports.isEpi = isEpi;
    exports.isMas = isMas;

















    exports.isFem = isFem;


























    exports.mbNom = mbNom;
    exports.mbAdj = mbAdj;
    exports.mbAdjNb = mbAdjNb;
    exports.mbNomAdj = mbNomAdj;
    exports.mbNomNotAdj = mbNomNotAdj;
    exports.mbPpasNomNotAdj = mbPpasNomNotAdj;
    exports.mbVconj = mbVconj;
    exports.mbVconj123 = mbVconj123;
    exports.mbMG = mbMG;
    exports.mbInv = mbInv;
    exports.mbSg = mbSg;
    exports.mbPl = mbPl;
    exports.mbEpi = mbEpi;
    exports.mbMas = mbMas;
    exports.mbFem = mbFem;
    exports.mbNpr = mbNpr;
    exports.mbNprMasNotFem = mbNprMasNotFem;
}



>
|
|

|
|
|

|
|

|
|
|
|
|
|

|
|
|
|
|
|

|
|
|
|
|
|
|

|
|
|
|
|
|

|
|
|
|
|
|

|
|
|
|
|
|

|
|

|
|
|

|
|
|

|
|
|

|
|
|
|
|


|

|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|

|

|
|
|

|
|
|

|
|
|

|
|
|

|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|


|

|
|
|

|
|
|

|
|
|

|
|
|

|
|
|
|
|
|
|
|
|
|
|
|

|
|
|

|
|
|

|
|
|

|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|

|
|
|
|
|
|
>



|
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
//// Grammalecte - Compiled regular expressions


var cregex = {
    ///// Lemme
    _zLemma: new RegExp(">([a-zà-öø-ÿ0-9Ā-ʯ][a-zà-öø-ÿ0-9Ā-ʯ-]+)"),

    ///// Masculin / féminin / singulier / pluriel
    _zGender: new RegExp(":[mfe]"),
    _zNumber: new RegExp(":[spi]"),

    ///// Nom et adjectif
    _zNA: new RegExp(":[NA]"),

    //// nombre
    _zNAs: new RegExp(":[NA].*:s"),
    _zNAp: new RegExp(":[NA].*:p"),
    _zNAi: new RegExp(":[NA].*:i"),
    _zNAsi: new RegExp(":[NA].*:[si]"),
    _zNApi: new RegExp(":[NA].*:[pi]"),

    //// genre
    _zNAm: new RegExp(":[NA].*:m"),
    _zNAf: new RegExp(":[NA].*:f"),
    _zNAe: new RegExp(":[NA].*:e"),
    _zNAme: new RegExp(":[NA].*:[me]"),
    _zNAfe: new RegExp(":[NA].*:[fe]"),

    //// nombre et genre
    // singuilier
    _zNAms: new RegExp(":[NA].*:m.*:s"),
    _zNAfs: new RegExp(":[NA].*:f.*:s"),
    _zNAes: new RegExp(":[NA].*:e.*:s"),
    _zNAmes: new RegExp(":[NA].*:[me].*:s"),
    _zNAfes: new RegExp(":[NA].*:[fe].*:s"),

    // singulier et invariable
    _zNAmsi: new RegExp(":[NA].*:m.*:[si]"),
    _zNAfsi: new RegExp(":[NA].*:f.*:[si]"),
    _zNAesi: new RegExp(":[NA].*:e.*:[si]"),
    _zNAmesi: new RegExp(":[NA].*:[me].*:[si]"),
    _zNAfesi: new RegExp(":[NA].*:[fe].*:[si]"),

    // pluriel
    _zNAmp: new RegExp(":[NA].*:m.*:p"),
    _zNAfp: new RegExp(":[NA].*:f.*:p"),
    _zNAep: new RegExp(":[NA].*:e.*:p"),
    _zNAmep: new RegExp(":[NA].*:[me].*:p"),
    _zNAfep: new RegExp(":[NA].*:[me].*:p"),

    // pluriel et invariable
    _zNAmpi: new RegExp(":[NA].*:m.*:[pi]"),
    _zNAfpi: new RegExp(":[NA].*:f.*:[pi]"),
    _zNAepi: new RegExp(":[NA].*:e.*:[pi]"),
    _zNAmepi: new RegExp(":[NA].*:[me].*:[pi]"),
    _zNAfepi: new RegExp(":[NA].*:[fe].*:[pi]"),

    //// Divers
    _zAD: new RegExp(":[AB]"),

    ///// Verbe
    _zVconj: new RegExp(":[123][sp]"),
    _zVconj123: new RegExp(":V[123].*:[123][sp]"),

    ///// Nom | Adjectif | Verbe
    _zNVconj: new RegExp(":(?:N|[123][sp])"),
    _zNAVconj: new RegExp(":(?:N|A|[123][sp])"),

    ///// Spécifique
    _zNnotA: new RegExp(":N(?!:A)"),
    _zPNnotA: new RegExp(":(?:N(?!:A)|Q)"),

    ///// Noms propres
    _zNP: new RegExp(":(?:M[12P]|T)"),
    _zNPm: new RegExp(":(?:M[12P]|T):m"),
    _zNPf: new RegExp(":(?:M[12P]|T):f"),
    _zNPe: new RegExp(":(?:M[12P]|T):e"),


    ///// FONCTIONS

    getLemmaOfMorph: function (sMorph) {
        return this._zLemma.exec(sMorph)[1];
    },

    checkAgreement: function (l1, l2) {
        // check number agreement
        if (!this.mbInv(l1) && !this.mbInv(l2)) {
            if (this.mbSg(l1) && !this.mbSg(l2)) {
                return false;
            }
            if (this.mbPl(l1) && !this.mbPl(l2)) {
                return false;
            }
        }
        // check gender agreement
        if (this.mbEpi(l1) || this.mbEpi(l2)) {
            return true;
        }
        if (this.mbMas(l1) && !this.mbMas(l2)) {
            return false;
        }
        if (this.mbFem(l1) && !this.mbFem(l2)) {
            return false;
        }
        return true;
    },

    checkConjVerb: function (lMorph, sReqConj) {
        return lMorph.some(s  =>  s.includes(sReqConj));
    },

    getGender: function (lMorph) {
        // returns gender of word (':m', ':f', ':e' or empty string).
        let sGender = "";
        for (let sMorph of lMorph) {
            let m = this._zGender.exec(sMorph);
            if (m) {
                if (!sGender) {
                    sGender = m[0];
                } else if (sGender != m[0]) {
                    return ":e";
                }
            }
        }
        return sGender;
    },

    getNumber: function (lMorph) {
        // returns number of word (':s', ':p', ':i' or empty string).
        let sNumber = "";
        for (let sMorph of lMorph) {
            let m = this._zNumber.exec(sWord);
            if (m) {
                if (!sNumber) {
                    sNumber = m[0];
                } else if (sNumber != m[0]) {
                    return ":i";
                }
            }
        }
        return sNumber;
    },

    // NOTE :  isWhat (lMorph)    returns true   if lMorph contains nothing else than What
    //         mbWhat (lMorph)    returns true   if lMorph contains What at least once

    //// isXXX = it’s certain

    isNom: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":N"));
    },

    isNomNotAdj: function (lMorph) {
        return lMorph.every(s  =>  this._zNnotA.test(s));
    },

    isAdj: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":A"));
    },

    isNomAdj: function (lMorph) {
        return lMorph.every(s  =>  this._zNA.test(s));
    },

    isNomVconj: function (lMorph) {
        return lMorph.every(s  =>  this._zNVconj.test(s));
    },

    isInv: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":i"));
    },
    isSg: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":s"));
    },
    isPl: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":p"));
    },
    isEpi: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":e"));
    },
    isMas: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":m"));
    },
    isFem: function (lMorph) {
        return lMorph.every(s  =>  s.includes(":f"));
    },


    //// mbXXX = MAYBE XXX

    mbNom: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":N"));
    },

    mbAdj: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":A"));
    },

    mbAdjNb: function (lMorph) {
        return lMorph.some(s  =>  this._zAD.test(s));
    },

    mbNomAdj: function (lMorph) {
        return lMorph.some(s  =>  this._zNA.test(s));
    },

    mbNomNotAdj: function (lMorph) {
        let b = false;
        for (let s of lMorph) {
            if (s.includes(":A")) {
                return false;
            }
            if (s.includes(":N")) {
                b = true;
            }
        }
        return b;
    },

    mbPpasNomNotAdj: function (lMorph) {
        return lMorph.some(s  =>  this._zPNnotA.test(s));
    },

    mbVconj: function (lMorph) {
        return lMorph.some(s  =>  this._zVconj.test(s));
    },

    mbVconj123: function (lMorph) {
        return lMorph.some(s  =>  this._zVconj123.test(s));
    },

    mbMG: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":G"));
    },

    mbInv: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":i"));
    },
    mbSg: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":s"));
    },
    mbPl: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":p"));
    },
    mbEpi: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":e"));
    },
    mbMas: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":m"));
    },
    mbFem: function (lMorph) {
        return lMorph.some(s  =>  s.includes(":f"));
    },

    mbNpr: function (lMorph) {
        return lMorph.some(s  =>  this._zNP.test(s));
    },

    mbNprMasNotFem: function (lMorph) {
        if (lMorph.some(s  =>  this._zNPf.test(s))) {
            return false;
        }
        return lMorph.some(s  =>  this._zNPm.test(s));
    }
};


if (typeof(exports) !== 'undefined') {
    exports._zLemma = cregex._zLemma;
    exports._zGender = cregex._zGender;
    exports._zNumber = cregex._zNumber;
    exports._zNA = cregex._zNA;
    exports._zNAs = cregex._zNAs;
    exports._zNAp = cregex._zNAp;
    exports._zNAi = cregex._zNAi;
    exports._zNAsi = cregex._zNAsi;
    exports._zNApi = cregex._zNApi;
    exports._zNAm = cregex._zNAm;
    exports._zNAf = cregex._zNAf;
    exports._zNAe = cregex._zNAe;
    exports._zNAme = cregex._zNAme;
    exports._zNAfe = cregex._zNAfe;
    exports._zNAms = cregex._zNAms;
    exports._zNAfs = cregex._zNAfs;
    exports._zNAes = cregex._zNAes;
    exports._zNAmes = cregex._zNAmes;
    exports._zNAfes = cregex._zNAfes;
    exports._zNAmsi = cregex._zNAmsi;
    exports._zNAfsi = cregex._zNAfsi;
    exports._zNAesi = cregex._zNAesi;
    exports._zNAmesi = cregex._zNAmesi;
    exports._zNAfesi = cregex._zNAfesi;
    exports._zNAmp = cregex._zNAmp;
    exports._zNAfp = cregex._zNAfp;
    exports._zNAep = cregex._zNAep;
    exports._zNAmep = cregex._zNAmep;
    exports._zNAfep = cregex._zNAfep;
    exports._zNAmpi = cregex._zNAmpi;
    exports._zNAfpi = cregex._zNAfpi;
    exports._zNAepi = cregex._zNAepi;
    exports._zNAmepi = cregex._zNAmepi;
    exports._zNAfepi = cregex._zNAfepi;
    exports._zAD = cregex._zAD;
    exports._zVconj = cregex._zVconj;
    exports._zVconj123 = cregex._zVconj123;
    exports._zNVconj = cregex._zNVconj;
    exports._zNAVconj = cregex._zNAVconj;
    exports._zNnotA = cregex._zNnotA;
    exports._zPNnotA = cregex._zPNnotA;
    exports._zNP = cregex._zNP;
    exports._zNPm = cregex._zNPm;
    exports._zNPf = cregex._zNPf;
    exports._zNPe = cregex._zNPe;
    exports.getLemmaOfMorph = cregex.getLemmaOfMorph;
    exports.checkAgreement = cregex.checkAgreement;
    exports.checkConjVerb = cregex.checkConjVerb;
    exports.getGender = cregex.getGender;
    exports.getNumber = cregex.getNumber;
    exports.isNom = cregex.isNom;
    exports.isNomNotAdj = cregex.isNomNotAdj;
    exports.isAdj = cregex.isAdj;
    exports.isNomAdj = cregex.isNomAdj;
    exports.isNomVconj = cregex.isNomVconj;
    exports.isInv = cregex.isInv;
    exports.isSg = cregex.isSg;
    exports.isPl = cregex.isPl;
    exports.isEpi = cregex.isEpi;
    exports.isMas = cregex.isMas;
    exports.isFem = cregex.isFem;
    exports.mbNom = cregex.mbNom;
    exports.mbAdj = cregex.mbAdj;
    exports.mbAdjNb = cregex.mbAdjNb;
    exports.mbNomAdj = cregex.mbNomAdj;
    exports.mbNomNotAdj = cregex.mbNomNotAdj;
    exports.mbPpasNomNotAdj = cregex.mbPpasNomNotAdj;
    exports.mbVconj = cregex.mbVconj;
    exports.mbVconj123 = cregex.mbVconj123;
    exports.mbMG = cregex.mbMG;
    exports.mbInv = cregex.mbInv;
    exports.mbSg = cregex.mbSg;
    exports.mbPl = cregex.mbPl;
    exports.mbEpi = cregex.mbEpi;
    exports.mbMas = cregex.mbMas;
    exports.mbFem = cregex.mbFem;
    exports.mbNpr = cregex.mbNpr;
    exports.mbNprMasNotFem = cregex.mbNprMasNotFem;
}

Modified gc_lang/fr/modules-js/gce_analyseur.js from [673094989d] to [0ad6fe5843].

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
        return "vous";
    }
    if (s2 == "eux") {
        return "ils";
    }
    if (s2 == "elle" || s2 == "elles") {
        // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
        if (cr.mbNprMasNotFem(_dAnalyses.gl_get(s1, ""))) {
            return "ils";
        }
        // si épicène, indéterminable, mais OSEF, le féminin l’emporte
        return "elles";
    }
    return s1 + " et " + s2;
}

function apposition (sWord1, sWord2) {
    // returns true if nom + nom (no agreement required)
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    return cr.mbNomNotAdj(_dAnalyses.gl_get(sWord2, "")) && cr.mbPpasNomNotAdj(_dAnalyses.gl_get(sWord1, ""));
}

function isAmbiguousNAV (sWord) {
    // words which are nom|adj and verb are ambiguous (except être and avoir)
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return false;
    }
    if (!cr.mbNomAdj(_dAnalyses.gl_get(sWord, "")) || sWord == "est") {
        return false;
    }
    if (cr.mbVconj(_dAnalyses.gl_get(sWord, "")) && !cr.mbMG(_dAnalyses.gl_get(sWord, ""))) {
        return true;
    }
    return false;
}

function isAmbiguousAndWrong (sWord1, sWord2, sReqMorphNA, sReqMorphConj) {
    //// use it if sWord1 won’t be a verb; word2 is assumed to be true via isAmbiguousNAV
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let a2 = _dAnalyses.gl_get(sWord2, null);
    if (!a2 || a2.length === 0) {
        return false;
    }
    if (cr.checkConjVerb(a2, sReqMorphConj)) {
        // verb word2 is ok
        return false;
    }
    let a1 = _dAnalyses.gl_get(sWord1, null);
    if (!a1 || a1.length === 0) {
        return false;
    }
    if (cr.checkAgreement(a1, a2) && (cr.mbAdj(a2) || cr.mbAdj(a1))) {
        return false;
    }
    return true;
}

function isVeryAmbiguousAndWrong (sWord1, sWord2, sReqMorphNA, sReqMorphConj, bLastHopeCond) {
    //// use it if sWord1 can be also a verb; word2 is assumed to be true via isAmbiguousNAV
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let a2 = _dAnalyses.gl_get(sWord2, null)
    if (!a2 || a2.length === 0) {
        return false;
    }
    if (cr.checkConjVerb(a2, sReqMorphConj)) {
        // verb word2 is ok
        return false;
    }
    let a1 = _dAnalyses.gl_get(sWord1, null);
    if (!a1 || a1.length === 0) {
        return false;
    }
    if (cr.checkAgreement(a1, a2) && (cr.mbAdj(a2) || cr.mbAdjNb(a1))) {
        return false;
    }
    // now, we know there no agreement, and conjugation is also wrong
    if (cr.isNomAdj(a1)) {
        return true;
    }
    //if cr.isNomAdjVerb(a1): # considered true
    if (bLastHopeCond) {
        return true;
    }
    return false;
}

function checkAgreement (sWord1, sWord2) {
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let a2 = _dAnalyses.gl_get(sWord2, null)
    if (!a2 || a2.length === 0) {
        return true;
    }
    let a1 = _dAnalyses.gl_get(sWord1, null);
    if (!a1 || a1.length === 0) {
        return true;
    }
    return cr.checkAgreement(a1, a2);
}

function mbUnit (s) {
    if (/[µ\/⁰¹²³⁴⁵⁶⁷⁸⁹Ωℓ·]/.test(s)) {
        return true;
    }
    if (s.length > 1 && s.length < 16 && s.slice(0, 1).gl_isLowerCase() && (!s.slice(1).gl_isLowerCase() || /[0-9]/.test(s))) {







|











|







|


|












|







|








|



|







|



|


|








|







|







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
        return "vous";
    }
    if (s2 == "eux") {
        return "ils";
    }
    if (s2 == "elle" || s2 == "elles") {
        // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
        if (cregex.mbNprMasNotFem(_dAnalyses.gl_get(s1, ""))) {
            return "ils";
        }
        // si épicène, indéterminable, mais OSEF, le féminin l’emporte
        return "elles";
    }
    return s1 + " et " + s2;
}

function apposition (sWord1, sWord2) {
    // returns true if nom + nom (no agreement required)
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    return cregex.mbNomNotAdj(_dAnalyses.gl_get(sWord2, "")) && cregex.mbPpasNomNotAdj(_dAnalyses.gl_get(sWord1, ""));
}

function isAmbiguousNAV (sWord) {
    // words which are nom|adj and verb are ambiguous (except être and avoir)
    if (!_dAnalyses.has(sWord) && !_storeMorphFromFSA(sWord)) {
        return false;
    }
    if (!cregex.mbNomAdj(_dAnalyses.gl_get(sWord, "")) || sWord == "est") {
        return false;
    }
    if (cregex.mbVconj(_dAnalyses.gl_get(sWord, "")) && !cregex.mbMG(_dAnalyses.gl_get(sWord, ""))) {
        return true;
    }
    return false;
}

function isAmbiguousAndWrong (sWord1, sWord2, sReqMorphNA, sReqMorphConj) {
    //// use it if sWord1 won’t be a verb; word2 is assumed to be true via isAmbiguousNAV
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let a2 = _dAnalyses.gl_get(sWord2, null);
    if (!a2 || a2.length === 0) {
        return false;
    }
    if (cregex.checkConjVerb(a2, sReqMorphConj)) {
        // verb word2 is ok
        return false;
    }
    let a1 = _dAnalyses.gl_get(sWord1, null);
    if (!a1 || a1.length === 0) {
        return false;
    }
    if (cregex.checkAgreement(a1, a2) && (cregex.mbAdj(a2) || cregex.mbAdj(a1))) {
        return false;
    }
    return true;
}

function isVeryAmbiguousAndWrong (sWord1, sWord2, sReqMorphNA, sReqMorphConj, bLastHopeCond) {
    //// use it if sWord1 can be also a verb; word2 is assumed to be true via isAmbiguousNAV
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let a2 = _dAnalyses.gl_get(sWord2, null);
    if (!a2 || a2.length === 0) {
        return false;
    }
    if (cregex.checkConjVerb(a2, sReqMorphConj)) {
        // verb word2 is ok
        return false;
    }
    let a1 = _dAnalyses.gl_get(sWord1, null);
    if (!a1 || a1.length === 0) {
        return false;
    }
    if (cregex.checkAgreement(a1, a2) && (cregex.mbAdj(a2) || cregex.mbAdjNb(a1))) {
        return false;
    }
    // now, we know there no agreement, and conjugation is also wrong
    if (cregex.isNomAdj(a1)) {
        return true;
    }
    //if cregex.isNomAdjVerb(a1): # considered true
    if (bLastHopeCond) {
        return true;
    }
    return false;
}

function checkAgreement (sWord1, sWord2) {
    // We don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let a2 = _dAnalyses.gl_get(sWord2, null);
    if (!a2 || a2.length === 0) {
        return true;
    }
    let a1 = _dAnalyses.gl_get(sWord1, null);
    if (!a1 || a1.length === 0) {
        return true;
    }
    return cregex.checkAgreement(a1, a2);
}

function mbUnit (s) {
    if (/[µ\/⁰¹²³⁴⁵⁶⁷⁸⁹Ωℓ·]/.test(s)) {
        return true;
    }
    if (s.length > 1 && s.length < 16 && s.slice(0, 1).gl_isLowerCase() && (!s.slice(1).gl_isLowerCase() || /[0-9]/.test(s))) {

Modified gc_lang/fr/modules-js/gce_suggestions.js from [d46530bfae] to [db0ba9f8a1].

1
2

3
4
5

6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
//// GRAMMAR CHECKING ENGINE PLUGIN: Suggestion mechanisms


const conj = require("resource://grammalecte/fr/conj.js");
const mfsp = require("resource://grammalecte/fr/mfsp.js");
const phonet = require("resource://grammalecte/fr/phonet.js");



//// verbs

function suggVerb (sFlex, sWho, funcSugg2=null) {
    // we don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let aSugg = new Set();
    for (let sStem of stem(sFlex)) {
        let tTags = conj._getTags(sStem);
        if (tTags) {
            // we get the tense
            let aTense = new Set();
            for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
                let m;
                let zVerb = new RegExp (sStem+" .*?(:(?:Y|I[pqsf]|S[pq]|K))", "g");
                while (m = zVerb.exec(sMorph)) {
                    // stem must be used in regex to prevent confusion between different verbs (e.g. sauras has 2 stems: savoir and saurer)
                    if (m) {
                        if (m[1] === ":Y") {
                            aTense.add(":Ip");
                            aTense.add(":Iq");
                            aTense.add(":Is");
                        } else if (m[1] === ":P") {


>
|
|
|
>














|
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
//// GRAMMAR CHECKING ENGINE PLUGIN: Suggestion mechanisms

if (typeof(exports) !== 'undefined') {
    var conj = require("resource://grammalecte/fr/conj.js");
    var mfsp = require("resource://grammalecte/fr/mfsp.js");
    var phonet = require("resource://grammalecte/fr/phonet.js");
}


//// verbs

function suggVerb (sFlex, sWho, funcSugg2=null) {
    // we don’t check if word exists in _dAnalyses, for it is assumed it has been done before
    let aSugg = new Set();
    for (let sStem of stem(sFlex)) {
        let tTags = conj._getTags(sStem);
        if (tTags) {
            // we get the tense
            let aTense = new Set();
            for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
                let m;
                let zVerb = new RegExp (">"+sStem+" .*?(:(?:Y|I[pqsf]|S[pq]|K))", "g");
                while ((m = zVerb.exec(sMorph)) !== null) {
                    // stem must be used in regex to prevent confusion between different verbs (e.g. sauras has 2 stems: savoir and saurer)
                    if (m) {
                        if (m[1] === ":Y") {
                            aTense.add(":Ip");
                            aTense.add(":Iq");
                            aTense.add(":Is");
                        } else if (m[1] === ":P") {
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

function suggVerbInfi (sFlex) {
    //return stem(sFlex).join("|");
    return [ for (sStem of stem(sFlex)) if (conj.isVerb(sStem)) sStem ].join("|");
}


const _dQuiEst = new Map ([
    ["je", ":1s"], ["j’", ":1s"], ["j’en", ":1s"], ["j’y", ":1s"],
    ["tu", ":2s"], ["il", ":3s"], ["on", ":3s"], ["elle", ":3s"],
    ["nous", ":1p"], ["vous", ":2p"], ["ils", ":3p"], ["elles", ":3p"]







|
<







139
140
141
142
143
144
145
146

147
148
149
150
151
152
153
    if (aSugg.size > 0) {
        return Array.from(aSugg).join("|");
    }
    return "";
}

function suggVerbInfi (sFlex) {
    return stem(sFlex).filter(sStem => conj.isVerb(sStem)).join("|");

}


const _dQuiEst = new Map ([
    ["je", ":1s"], ["j’", ":1s"], ["j’en", ":1s"], ["j’y", ":1s"],
    ["tu", ":2s"], ["il", ":3s"], ["on", ":3s"], ["elle", ":3s"],
    ["nous", ":1p"], ["vous", ":2p"], ["ils", ":3p"], ["elles", ":3p"]
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207

function suggPlur (sFlex, sWordToAgree=null) {
    // returns plural forms assuming sFlex is singular
    if (sWordToAgree) {
        if (!_dAnalyses.has(sWordToAgree) && !_storeMorphFromFSA(sWordToAgree)) {
            return "";
        }
        let sGender = cr.getGender(_dAnalyses.gl_get(sWordToAgree, []));
        if (sGender == ":m") {
            return suggMasPlur(sFlex);
        } else if (sGender == ":f") {
            return suggFemPlur(sFlex);
        }
    }
    let aSugg = new Set();







|







194
195
196
197
198
199
200
201
202
203
204
205
206
207
208

function suggPlur (sFlex, sWordToAgree=null) {
    // returns plural forms assuming sFlex is singular
    if (sWordToAgree) {
        if (!_dAnalyses.has(sWordToAgree) && !_storeMorphFromFSA(sWordToAgree)) {
            return "";
        }
        let sGender = cregex.getGender(_dAnalyses.gl_get(sWordToAgree, []));
        if (sGender == ":m") {
            return suggMasPlur(sFlex);
        } else if (sGender == ":f") {
            return suggFemPlur(sFlex);
        }
    }
    let aSugg = new Set();
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":m") || sMorph.includes(":e")) {
                aSugg.add(suggSing(sFlex));
            } else {
                let sStem = cr.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    mfsp.getMasForm(sStem, false).forEach(function(x) { aSugg.add(x); });
                }
            }
        } else {
            // a verb
            let sVerb = cr.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q1") && conj.hasConj(sVerb, ":PQ", ":Q3")) {
                // We also check if the verb has a feminine form.
                // If not, we consider it’s better to not suggest the masculine one, as it can be considered invariable.
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q1"));
            }
        }
    }







|






|







260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":m") || sMorph.includes(":e")) {
                aSugg.add(suggSing(sFlex));
            } else {
                let sStem = cregex.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    mfsp.getMasForm(sStem, false).forEach(function(x) { aSugg.add(x); });
                }
            }
        } else {
            // a verb
            let sVerb = cregex.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q1") && conj.hasConj(sVerb, ":PQ", ":Q3")) {
                // We also check if the verb has a feminine form.
                // If not, we consider it’s better to not suggest the masculine one, as it can be considered invariable.
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q1"));
            }
        }
    }
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":m") || sMorph.includes(":e")) {
                aSugg.add(suggPlur(sFlex));
            } else {
                let sStem = cr.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    mfsp.getMasForm(sStem, true).forEach(function(x) { aSugg.add(x); });
                }
            }
        } else {
            // a verb
            let sVerb = cr.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q2")) {
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q2"));
            } else if (conj.hasConj(sVerb, ":PQ", ":Q1")) {
                let sSugg = conj.getConj(sVerb, ":PQ", ":Q1");
                // it is necessary to filter these flexions, like “succédé” or “agi” that are not masculine plural
                if (sSugg.endsWith("s")) {
                    aSugg.add(sSugg);







|






|







296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":m") || sMorph.includes(":e")) {
                aSugg.add(suggPlur(sFlex));
            } else {
                let sStem = cregex.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    mfsp.getMasForm(sStem, true).forEach(function(x) { aSugg.add(x); });
                }
            }
        } else {
            // a verb
            let sVerb = cregex.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q2")) {
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q2"));
            } else if (conj.hasConj(sVerb, ":PQ", ":Q1")) {
                let sSugg = conj.getConj(sVerb, ":PQ", ":Q1");
                // it is necessary to filter these flexions, like “succédé” or “agi” that are not masculine plural
                if (sSugg.endsWith("s")) {
                    aSugg.add(sSugg);
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":f") || sMorph.includes(":e")) {
                aSugg.add(suggSing(sFlex));
            } else {
                let sStem = cr.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    aSugg.add(sStem);
                }
            }
        } else {
            // a verb
            let sVerb = cr.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q3")) {
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q3"));
            }
        }
    }
    if (bSuggSimil) {
        for (let e of phonet.selectSimil(sFlex, ":f:[si]")) {







|






|







337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":f") || sMorph.includes(":e")) {
                aSugg.add(suggSing(sFlex));
            } else {
                let sStem = cregex.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    aSugg.add(sStem);
                }
            }
        } else {
            // a verb
            let sVerb = cregex.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q3")) {
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q3"));
            }
        }
    }
    if (bSuggSimil) {
        for (let e of phonet.selectSimil(sFlex, ":f:[si]")) {
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":f") || sMorph.includes(":e")) {
                aSugg.add(suggPlur(sFlex));
            } else {
                let sStem = cr.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    aSugg.add(sStem+"s");
                }
            }
        } else {
            // a verb
            let sVerb = cr.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q4")) {
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q4"));
            }
        }
    }
    if (bSuggSimil) {
        for (let e of phonet.selectSimil(sFlex, ":f:[pi]")) {







|






|







371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
    let aSugg = new Set();
    for (let sMorph of _dAnalyses.gl_get(sFlex, [])) {
        if (!sMorph.includes(":V")) {
            // not a verb
            if (sMorph.includes(":f") || sMorph.includes(":e")) {
                aSugg.add(suggPlur(sFlex));
            } else {
                let sStem = cregex.getLemmaOfMorph(sMorph);
                if (mfsp.isFemForm(sStem)) {
                    aSugg.add(sStem+"s");
                }
            }
        } else {
            // a verb
            let sVerb = cregex.getLemmaOfMorph(sMorph);
            if (conj.hasConj(sVerb, ":PQ", ":Q4")) {
                aSugg.add(conj.getConj(sVerb, ":PQ", ":Q4"));
            }
        }
    }
    if (bSuggSimil) {
        for (let e of phonet.selectSimil(sFlex, ":f:[pi]")) {

Modified gc_lang/fr/modules-js/lexicographe.js from [5e8c9037a7] to [a348883011].

1
2
3
4
5
6
7
8
9

10
11
12
13
14
15
16
17
18
19
// Grammalecte - Lexicographe
// License: MPL 2

"use strict";

${string}
${map}



const helpers = require("resource://grammalecte/helpers.js");
const tkz = require("resource://grammalecte/tokenizer.js");


const _dTAGS = new Map ([
    [':G', "[mot grammatical]"],
    [':N', " nom,"],
    [':A', " adjectif,"],
    [':M1', " prénom,"],
    [':M2', " patronyme,"],









>
|
<
|







1
2
3
4
5
6
7
8
9
10
11

12
13
14
15
16
17
18
19
// Grammalecte - Lexicographe
// License: MPL 2

"use strict";

${string}
${map}


if (typeof(exports) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");

}

const _dTAGS = new Map ([
    [':G', "[mot grammatical]"],
    [':N', " nom,"],
    [':A', " adjectif,"],
    [':M1', " prénom,"],
    [':M2', " patronyme,"],
195
196
197
198
199
200
201
202

203
204
205
206
207
208
209
class Lexicographe {

    constructor (oDict) {
        this.oDict = oDict;
        this._zElidedPrefix = new RegExp ("^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)", "i");
        this._zCompoundWord = new RegExp ("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$", "i");
        this._zTag = new RegExp ("[:;/][a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ*][^:;/]*", "g");
    };


    getInfoForToken (oToken) {
        // Token: .sType, .sValue, .nStart, .nEnd
        // return a list [type, token_string, values]
        let m = null;
        try {
            switch (oToken.sType) {







<
>







195
196
197
198
199
200
201

202
203
204
205
206
207
208
209
class Lexicographe {

    constructor (oDict) {
        this.oDict = oDict;
        this._zElidedPrefix = new RegExp ("^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)", "i");
        this._zCompoundWord = new RegExp ("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$", "i");
        this._zTag = new RegExp ("[:;/][a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ*][^:;/]*", "g");

    }

    getInfoForToken (oToken) {
        // Token: .sType, .sValue, .nStart, .nEnd
        // return a list [type, token_string, values]
        let m = null;
        try {
            switch (oToken.sType) {
222
223
224
225
226
227
228
229



230
231
232
233
234
235



236
237
238
239
240
241
242
243
244
245
246
247
248
249

250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270

271
272
273
274
275
276
277
278
279
280
281
282
283
284

285
286
287
288
289
290
                    break;
                case 'WORD': 
                    if (oToken.sValue.gl_count("-") > 4) {
                        return { sType: "COMPLEX", sValue: oToken.sValue, aLabel: ["élément complexe indéterminé"] };
                    }
                    else if (this.oDict.isValidToken(oToken.sValue)) {
                        let lMorph = this.oDict.getMorph(oToken.sValue);
                        let aElem = [ for (s of lMorph) if (s.includes(":")) this._formatTags(s) ];



                        return { sType: oToken.sType, sValue: oToken.sValue, aLabel: aElem};
                    }
                    else if (m = this._zCompoundWord.exec(oToken.sValue)) {
                        // mots composés
                        let lMorph = this.oDict.getMorph(m[1]);
                        let aElem = [ for (s of lMorph) if (s.includes(":")) this._formatTags(s) ];



                        aElem.push("-" + m[2] + ": " + this._formatSuffix(m[2].toLowerCase()));
                        return { sType: oToken.sType, sValue: oToken.sValue, aLabel: aElem };
                    }
                    else {
                        return { sType: "UNKNOWN", sValue: oToken.sValue, aLabel: ["inconnu du dictionnaire"] };
                    }
                    break;
            }
        }
        catch (e) {
            helpers.logerror(e);
        }
        return null;
    };


    _formatTags (sTags) {
        let sRes = "";
        sTags = sTags.replace(/V([0-3][ea]?)[itpqnmr_eaxz]+/, "V$1");
        let m;
        while ((m = this._zTag.exec(sTags)) !== null) {
            sRes += _dTAGS.get(m[0]);
            if (sRes.length > 100) {
                break;
            }
        }
        if (sRes.startsWith(" verbe") && !sRes.endsWith("infinitif")) {
            sRes += " [" + sTags.slice(1, sTags.indexOf(" ")) + "]";
        }
        if (!sRes) {
            sRes = "#Erreur. Étiquette inconnue : [" + sTags + "]";
            helpers.echo(sRes);
            return sRes;
        }
        return sRes.gl_trimRight(",");
    };


    _formatSuffix (s) {
        if (s.startsWith("t-")) {
            return "“t” euphonique +" + _dAD.get(s.slice(2));
        }
        if (!s.includes("-")) {
            return _dAD.get(s.replace("’", "'"));
        }
        if (s.endsWith("ous")) {
            s += '2';
        }
        let nPos = s.indexOf("-");
        return _dAD.get(s.slice(0, nPos)) + " +" + _dAD.get(s.slice(nPos+1));
    };

}


if (typeof(exports) !== 'undefined') {
    exports.Lexicographe = Lexicographe;
}







|
>
>
>





|
>
>
>













<
>




















<
>













<
>






222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275

276
277
278
279
280
281
282
283
284
285
286
287
288
289

290
291
292
293
294
295
296
                    break;
                case 'WORD': 
                    if (oToken.sValue.gl_count("-") > 4) {
                        return { sType: "COMPLEX", sValue: oToken.sValue, aLabel: ["élément complexe indéterminé"] };
                    }
                    else if (this.oDict.isValidToken(oToken.sValue)) {
                        let lMorph = this.oDict.getMorph(oToken.sValue);
                        let aElem = [];
                        for (let s of lMorph){
                            if (s.includes(":"))  aElem.push( this._formatTags(s) );
                        }
                        return { sType: oToken.sType, sValue: oToken.sValue, aLabel: aElem};
                    }
                    else if (m = this._zCompoundWord.exec(oToken.sValue)) {
                        // mots composés
                        let lMorph = this.oDict.getMorph(m[1]);
                        let aElem = [];
                        for (let s of lMorph){
                            if (s.includes(":"))  aElem.push( this._formatTags(s) );
                        }
                        aElem.push("-" + m[2] + ": " + this._formatSuffix(m[2].toLowerCase()));
                        return { sType: oToken.sType, sValue: oToken.sValue, aLabel: aElem };
                    }
                    else {
                        return { sType: "UNKNOWN", sValue: oToken.sValue, aLabel: ["inconnu du dictionnaire"] };
                    }
                    break;
            }
        }
        catch (e) {
            helpers.logerror(e);
        }
        return null;

    }

    _formatTags (sTags) {
        let sRes = "";
        sTags = sTags.replace(/V([0-3][ea]?)[itpqnmr_eaxz]+/, "V$1");
        let m;
        while ((m = this._zTag.exec(sTags)) !== null) {
            sRes += _dTAGS.get(m[0]);
            if (sRes.length > 100) {
                break;
            }
        }
        if (sRes.startsWith(" verbe") && !sRes.endsWith("infinitif")) {
            sRes += " [" + sTags.slice(1, sTags.indexOf(" ")) + "]";
        }
        if (!sRes) {
            sRes = "#Erreur. Étiquette inconnue : [" + sTags + "]";
            helpers.echo(sRes);
            return sRes;
        }
        return sRes.gl_trimRight(",");

    }

    _formatSuffix (s) {
        if (s.startsWith("t-")) {
            return "“t” euphonique +" + _dAD.get(s.slice(2));
        }
        if (!s.includes("-")) {
            return _dAD.get(s.replace("’", "'"));
        }
        if (s.endsWith("ous")) {
            s += '2';
        }
        let nPos = s.indexOf("-");
        return _dAD.get(s.slice(0, nPos)) + " +" + _dAD.get(s.slice(nPos+1));

    }
}


if (typeof(exports) !== 'undefined') {
    exports.Lexicographe = Lexicographe;
}

Modified gc_lang/fr/modules-js/mfsp.js from [404b1b695d] to [5ce8b4f9f6].

1
2
3
4


5
6
7
8
9

10
11
12
13
14
15
16
17








18
19


20

21
22
23
24
25
26
27
28
29


30
31


32
33
34
35
36
37
38
39
40
41
42


43
44


45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78













79
80
81
82





83
84
85
86


87
// Grammalecte

"use strict";



const helpers = require("resource://grammalecte/helpers.js");
const echo = helpers.echo;

const oData = JSON.parse(helpers.loadFile("resource://grammalecte/fr/mfsp_data.json"));


// list of affix codes
const _lTagMiscPlur = oData.lTagMiscPlur;
const _lTagMasForm = oData.lTagMasForm;

// dictionary of words with uncommon plurals (-x, -ux, english, latin and italian plurals) and tags to generate them
const _dMiscPlur = helpers.objectToMap(oData.dMiscPlur);

// dictionary of feminine forms and tags to generate masculine forms (singular and plural)








const _dMasForm = helpers.objectToMap(oData.dMasForm);






function isFemForm (sWord) {
    // returns True if sWord exists in _dMasForm
    return _dMasForm.has(sWord);
}

function getMasForm (sWord, bPlur) {
    // returns masculine form with feminine form
    if (_dMasForm.has(sWord)) {


        return [ for (sTag of _whatSuffixCodes(sWord, bPlur))  _modifyStringWithSuffixCode(sWord, sTag) ];
    }


    return [];
}

function hasMiscPlural (sWord) {
    // returns True if sWord exists in dMiscPlur
    return _dMiscPlur.has(sWord);
}

function getMiscPlural (sWord) {
    // returns plural form with singular form
    if (_dMiscPlur.has(sWord)) {


        return [ for (sTag of _lTagMiscPlur[_dMiscPlur.get(sWord)].split("|"))  _modifyStringWithSuffixCode(sWord, sTag) ];
    }


    return [];
}

function _whatSuffixCodes (sWord, bPlur) {
    // necessary only for dMasFW
    let sSfx = _lTagMasForm[_dMasForm.get(sWord)];
    if (sSfx.includes("/")) {
        if (bPlur) {
            return sSfx.slice(sSfx.indexOf("/")+1).split("|");
        }
        return sSfx.slice(0, sSfx.indexOf("/")).split("|");
    }
    return sSfx.split("|");
}

function _modifyStringWithSuffixCode (sWord, sSfx) {
    // returns sWord modified by sSfx
    if (!sWord) {
        return "";
    }
    if (sSfx === "0") {
        return sWord;
    }
    try {
        if (sSfx[0] !== '0') {
            return sWord.slice(0, -(sSfx.charCodeAt(0)-48)) + sSfx.slice(1); // 48 is the ASCII code for "0"
        } else {
            return sWord + sSfx.slice(1);
        }
    }
    catch (e) {
        console.log(e);
        return "## erreur, code : " + sSfx + " ##";
    }













}


if (typeof(exports) !== 'undefined') {





    exports.isFemForm = isFemForm;
    exports.getMasForm = getMasForm;
    exports.hasMiscPlural = hasMiscPlural;
    exports.getMiscPlural = getMiscPlural;


}




>
>
|
<
|
|

>
|
|
|
<
|
<
|
|
>
>
>
>
>
>
>
>
|
|
>
>
|
>

|
|
|
|

|
|
|
>
>
|
|
>
>
|
|

|
|
|
|

|
|
|
>
>
|
|
>
>
|
|

|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>




>
>
>
>
>
|
|
|
|
>
>

1
2
3
4
5
6
7

8
9
10
11
12
13
14

15

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// Grammalecte

"use strict";


if (typeof(require) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");

}


var mfsp = {
    // list of affix codes
    _lTagMiscPlur: [],
    _lTagMasForm: [],

    // dictionary of words with uncommon plurals (-x, -ux, english, latin and italian plurals) and tags to generate them

    _dMiscPlur: new Map(),
    // dictionary of feminine forms and tags to generate masculine forms (singular and plural)
    _dMasForm: new Map(),

    init: function (sJSONData) {
        try {
            let _oData = JSON.parse(sJSONData);
            this._lTagMiscPlur = _oData.lTagMiscPlur;
            this._lTagMasForm = _oData.lTagMasForm;
            this._dMiscPlur = helpers.objectToMap(_oData.dMiscPlur);
            this._dMasForm = helpers.objectToMap(_oData.dMasForm);
        }
        catch (e) {
            console.error(e);
        }
    },

    isFemForm: function (sWord) {
        // returns True if sWord exists in this._dMasForm
        return this._dMasForm.has(sWord);
    },

    getMasForm: function (sWord, bPlur) {
        // returns masculine form with feminine form
        if (this._dMasForm.has(sWord)) {
            let aMasForm = [];
            for (let sTag of this._whatSuffixCode(sWord, bPlur)){
                aMasForm.push( this._modifyStringWithSuffixCode(sWord, sTag) );
            }
            return aMasForm;
        }
        return [];
    },

    hasMiscPlural: function (sWord) {
        // returns True if sWord exists in dMiscPlur
        return this._dMiscPlur.has(sWord);
    },

    getMiscPlural: function (sWord) {
        // returns plural form with singular form
        if (this._dMiscPlur.has(sWord)) {
            let aMiscPlural = [];
            for (let sTag of this._lTagMiscPlur[this._dMiscPlur.get(sWord)].split("|")){
                aMiscPlural.push( this._modifyStringWithSuffixCode(sWord, sTag) );
            }
            return aMiscPlural;
        }
        return [];
    },

    _whatSuffixCode: function (sWord, bPlur) {
        // necessary only for dMasFW
        let sSfx = this._lTagMasForm[this._dMasForm.get(sWord)];
        if (sSfx.includes("/")) {
            if (bPlur) {
                return sSfx.slice(sSfx.indexOf("/")+1).split("|");
            }
            return sSfx.slice(0, sSfx.indexOf("/")).split("|");
        }
        return sSfx.split("|");
    },

    _modifyStringWithSuffixCode: function (sWord, sSfx) {
        // returns sWord modified by sSfx
        if (!sWord) {
            return "";
        }
        if (sSfx === "0") {
            return sWord;
        }
        try {
            if (sSfx[0] !== '0') {
                return sWord.slice(0, -(sSfx.charCodeAt(0)-48)) + sSfx.slice(1); // 48 is the ASCII code for "0"
            } else {
                return sWord + sSfx.slice(1);
            }
        }
        catch (e) {
            console.log(e);
            return "## erreur, code : " + sSfx + " ##";
        }
    }
};


// Initialization
if (typeof(browser) !== 'undefined') {
    // WebExtension
    mfsp.init(helpers.loadFile(browser.extension.getURL("grammalecte/fr/mfsp_data.json")));
} else if (typeof(require) !== 'undefined') {
    // Add-on SDK and Thunderbird
    mfsp.init(helpers.loadFile("resource://grammalecte/fr/mfsp_data.json"));
} else {
    console.log("Module mfsp non initialisé");
}


if (typeof(exports) !== 'undefined') {
    exports._lTagMiscPlur = mfsp._lTagMiscPlur;
    exports._lTagMasForm = mfsp._lTagMasForm;
    exports._dMiscPlur = mfsp._dMiscPlur;
    exports._dMasForm = mfsp._dMasForm;
    exports.init = mfsp.init;
    exports.isFemForm = mfsp.isFemForm;
    exports.getMasForm = mfsp.getMasForm;
    exports.hasMiscPlural = mfsp.hasMiscPlural;
    exports.getMiscPlural = mfsp.getMiscPlural;
    exports._whatSuffixCode = mfsp._whatSuffixCode;
    exports._modifyStringWithSuffixCode = mfsp._modifyStringWithSuffixCode;
}

Modified gc_lang/fr/modules-js/phonet.js from [1d899a6d9f] to [742b1a34db].

1
2

3

4
5
6



7



8
9
10
11


12

13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67













68
69
70
71




72
73
74
75
// Grammalecte - Suggestion phonétique


const helpers = require("resource://grammalecte/helpers.js");

const echo = helpers.echo;

const oData = JSON.parse(helpers.loadFile("resource://grammalecte/fr/phonet_data.json"));







const _dWord = helpers.objectToMap(oData.dWord);
const _lSet = oData.lSet;
const _dMorph = helpers.objectToMap(oData.dMorph);






function hasSimil (sWord, sPattern=null) {
    // return True if there is list of words phonetically similar to sWord
    if (!sWord) {
        return false;
    }
    if (_dWord.has(sWord)) {
        if (sPattern) {
            return getSimil(sWord).some(sSimil => _dMorph.gl_get(sSimil, []).some(sMorph => sMorph.search(sPattern) >= 0));
        }
        return true;
    }
    if (sWord.slice(0,1).gl_isUpperCase()) {
        sWord = sWord.toLowerCase();
        if (_dWord.has(sWord)) {
            if (sPattern) {
                return getSimil(sWord).some(sSimil => _dMorph.gl_get(sSimil, []).some(sMorph => sMorph.search(sPattern) >= 0));
            }
            return true;
        }
    }
    return false;
}

function getSimil (sWord) {
    // return list of words phonetically similar to sWord
    if (!sWord) {
        return [];
    }
    if (_dWord.has(sWord)) {
        return _lSet[_dWord.get(sWord)];
    }
    if (sWord.slice(0,1).gl_isUpperCase()) {
        sWord = sWord.toLowerCase();
        if (_dWord.has(sWord)) {
            return _lSet[_dWord.get(sWord)];
        }
    }
    return [];
}

function selectSimil (sWord, sPattern) {
    // return list of words phonetically similar to sWord and whom POS is matching sPattern
    if (!sPattern) {
        return new Set(getSimil(sWord));
    }
    let aSelect = new Set();
    for (let sSimil of getSimil(sWord)) {
        for (let sMorph of _dMorph.gl_get(sSimil, [])) {
            if (sMorph.search(sPattern) >= 0) {
                aSelect.add(sSimil);
            }
        }
    }
    return aSelect;













}


if (typeof(exports) !== 'undefined') {




    exports.hasSimil = hasSimil;
    exports.getSimil = getSimil;
    exports.selectSimil = selectSimil;
}


>
|
>
|

|
>
>
>

>
>
>
|
|
|
|
>
>
|
>

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>




>
>
>
>
|
|
|

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
// Grammalecte - Suggestion phonétique

if (typeof(require) !== 'undefined') {
    var helpers = require("resource://grammalecte/helpers.js");
}


var phonet = {
    _dWord: new Map(),
    _lSet: [],
    _dMorph: new Map(),

    init: function (sJSONData) {
        try {
            let _oData = JSON.parse(sJSONData);
            this._dWord = helpers.objectToMap(_oData.dWord);
            this._lSet = _oData.lSet;
            this._dMorph = helpers.objectToMap(_oData.dMorph);
        }
        catch (e) {
            console.error(e);
        }
    },

    hasSimil: function (sWord, sPattern=null) {
        // return True if there is list of words phonetically similar to sWord
        if (!sWord) {
            return false;
        }
        if (this._dWord.has(sWord)) {
            if (sPattern) {
                return this.getSimil(sWord).some(sSimil => this._dMorph.gl_get(sSimil, []).some(sMorph => sMorph.search(sPattern) >= 0));
            }
            return true;
        }
        if (sWord.slice(0,1).gl_isUpperCase()) {
            sWord = sWord.toLowerCase();
            if (this._dWord.has(sWord)) {
                if (sPattern) {
                    return this.getSimil(sWord).some(sSimil => this._dMorph.gl_get(sSimil, []).some(sMorph => sMorph.search(sPattern) >= 0));
                }
                return true;
            }
        }
        return false;
    },

    getSimil: function (sWord) {
        // return list of words phonetically similar to sWord
        if (!sWord) {
            return [];
        }
        if (this._dWord.has(sWord)) {
            return this._lSet[this._dWord.get(sWord)];
        }
        if (sWord.slice(0,1).gl_isUpperCase()) {
            sWord = sWord.toLowerCase();
            if (this._dWord.has(sWord)) {
                return this._lSet[this._dWord.get(sWord)];
            }
        }
        return [];
    },

    selectSimil: function (sWord, sPattern) {
        // return list of words phonetically similar to sWord and whom POS is matching sPattern
        if (!sPattern) {
            return new Set(this.getSimil(sWord));
        }
        let aSelect = new Set();
        for (let sSimil of this.getSimil(sWord)) {
            for (let sMorph of this._dMorph.gl_get(sSimil, [])) {
                if (sMorph.search(sPattern) >= 0) {
                    aSelect.add(sSimil);
                }
            }
        }
        return aSelect;
    }
};


// Initialization
if (typeof(browser) !== 'undefined') {
    // WebExtension
    phonet.init(helpers.loadFile(browser.extension.getURL("grammalecte/fr/phonet_data.json")));
} else if (typeof(require) !== 'undefined') {
    // Add-on SDK and Thunderbird
    phonet.init(helpers.loadFile("resource://grammalecte/fr/phonet_data.json"));
} else {
    console.log("Module phonet non initialisé");
}


if (typeof(exports) !== 'undefined') {
    exports._dWord = phonet._dWord;
    exports._lSet = phonet._lSet;
    exports._dMorph = phonet._dMorph;
    exports.init = phonet.init;
    exports.hasSimil = phonet.hasSimil;
    exports.getSimil = phonet.getSimil;
    exports.selectSimil = phonet.selectSimil;
}

Modified gc_lang/fr/modules-js/textformatter.js from [7314bcd9a1] to [c6e77daf98].

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
                                    [/^(qu|lorsqu|puisqu|quoiqu|presqu|jusqu|aujourd|entr|quelqu|prud) /ig, "$1’"] ],
    "ma_1letter_lowercase":       [ [/[  ]([ldjnmtscç]) (?=[aàeéêiîoôuyhAÀEÉÊIÎOÔUYH])/g, "$1’"] ],
    "ma_1letter_uppercase":       [ [/[  ]([LDJNMTSCÇ]) (?=[aàeéêiîoôuyhAÀEÉÊIÎOÔUYH])/g, "$1’"],
                                    [/^([LDJNMTSCÇ]) (?=[aàeéêiîoôuyhAÀEÉÊIÎOÔUYH])/g, "$1’"] ]
};


const dDefaultOptions = new Map ([
    ["ts_units", true],
    ["start_of_paragraph", true],
    ["end_of_paragraph", true],
    ["between_words", true],
    ["before_punctuation", true],
    ["within_parenthesis", true],
    ["within_square_brackets", true],







|







194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
                                    [/^(qu|lorsqu|puisqu|quoiqu|presqu|jusqu|aujourd|entr|quelqu|prud) /ig, "$1’"] ],
    "ma_1letter_lowercase":       [ [/[  ]([ldjnmtscç]) (?=[aàeéêiîoôuyhAÀEÉÊIÎOÔUYH])/g, "$1’"] ],
    "ma_1letter_uppercase":       [ [/[  ]([LDJNMTSCÇ]) (?=[aàeéêiîoôuyhAÀEÉÊIÎOÔUYH])/g, "$1’"],
                                    [/^([LDJNMTSCÇ]) (?=[aàeéêiîoôuyhAÀEÉÊIÎOÔUYH])/g, "$1’"] ]
};


const dTFDefaultOptions = new Map ([
    ["ts_units", true],
    ["start_of_paragraph", true],
    ["end_of_paragraph", true],
    ["between_words", true],
    ["before_punctuation", true],
    ["within_parenthesis", true],
    ["within_square_brackets", true],
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276

277
278
279
280
281
282
283
284
285
286
    ["etc", true],
    ["missing_hyphens", true],
    ["ma_word", true],
    ["ma_1letter_lowercase", false],
    ["ma_1letter_uppercase", false]
]);

const dOptions = dDefaultOptions.gl_shallowCopy();


class TextFormatter {

    constructor () {
        this.sLang = "fr";
    };


    formatText (sText, dOpt=null) {
        if (dOpt !== null) {
            dOptions.gl_updateOnlyExistingKeys(dOpt);
        }
        for (let [sOptName, bVal] of dOptions) {
            if (bVal && oReplTable.has(sOptName)) {
                for (let [zRgx, sRep] of oReplTable[sOptName]) {
                    sText = sText.replace(zRgx, sRep);
                }
            }
        }
        return sText;
    };


    getDefaultOptions () {
        return dDefaultOptions;
    }
}


if (typeof(exports) !== 'undefined') {
    exports.TextFormatter = TextFormatter;
    exports.oReplTable = oReplTable;
}







|






<
>



|

|







<
|
>

|








247
248
249
250
251
252
253
254
255
256
257
258
259
260

261
262
263
264
265
266
267
268
269
270
271
272
273
274

275
276
277
278
279
280
281
282
283
284
285
286
    ["etc", true],
    ["missing_hyphens", true],
    ["ma_word", true],
    ["ma_1letter_lowercase", false],
    ["ma_1letter_uppercase", false]
]);

const dTFOptions = dTFDefaultOptions.gl_shallowCopy();


class TextFormatter {

    constructor () {
        this.sLang = "fr";

    }

    formatText (sText, dOpt=null) {
        if (dOpt !== null) {
            dTFOptions.gl_updateOnlyExistingKeys(dOpt);
        }
        for (let [sOptName, bVal] of dTFOptions) {
            if (bVal && oReplTable.has(sOptName)) {
                for (let [zRgx, sRep] of oReplTable[sOptName]) {
                    sText = sText.replace(zRgx, sRep);
                }
            }
        }
        return sText;

    }

    getDefaultOptions () {
        return dTFDefaultOptions;
    }
}


if (typeof(exports) !== 'undefined') {
    exports.TextFormatter = TextFormatter;
    exports.oReplTable = oReplTable;
}

Modified gc_lang/fr/modules/gce_suggestions.py from [948c38b1f8] to [e839f7efe5].

11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
    aSugg = set()
    for sStem in stem(sFlex):
        tTags = conj._getTags(sStem)
        if tTags:
            # we get the tense
            aTense = set()
            for sMorph in _dAnalyses.get(sFlex, []): # we don’t check if word exists in _dAnalyses, for it is assumed it has been done before
                for m in re.finditer(sStem+" .*?(:(?:Y|I[pqsf]|S[pq]|K|P))", sMorph):
                    # stem must be used in regex to prevent confusion between different verbs (e.g. sauras has 2 stems: savoir and saurer)
                    if m:
                        if m.group(1) == ":Y":
                            aTense.add(":Ip")
                            aTense.add(":Iq")
                            aTense.add(":Is")
                        elif m.group(1) == ":P":







|







11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
    aSugg = set()
    for sStem in stem(sFlex):
        tTags = conj._getTags(sStem)
        if tTags:
            # we get the tense
            aTense = set()
            for sMorph in _dAnalyses.get(sFlex, []): # we don’t check if word exists in _dAnalyses, for it is assumed it has been done before
                for m in re.finditer(">"+sStem+" .*?(:(?:Y|I[pqsf]|S[pq]|K|P))", sMorph):
                    # stem must be used in regex to prevent confusion between different verbs (e.g. sauras has 2 stems: savoir and saurer)
                    if m:
                        if m.group(1) == ":Y":
                            aTense.add(":Ip")
                            aTense.add(":Iq")
                            aTense.add(":Is")
                        elif m.group(1) == ":P":

Modified gc_lang/fr/tb/content/overlay.js from [31d038a91d] to [a6d0f601cc].

908
909
910
911
912
913
914

915
916
917
918
919
920
921
        oTextFormatter.resetProgressBar();
    }
}, false);

window.addEventListener("load", function (xEvent) {
    oDictIgniter.init();
    oGrammarChecker.loadGC();

}, false);

window.addEventListener("compose-window-init", function (xEvent) {
    oGrammarChecker.loadUI();
    oGrammarChecker.closePanel();
    oGrammarChecker.clearPreview();
    oTextFormatter.init();







>







908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
        oTextFormatter.resetProgressBar();
    }
}, false);

window.addEventListener("load", function (xEvent) {
    oDictIgniter.init();
    oGrammarChecker.loadGC();
    //oGrammarChecker.fullTests();
}, false);

window.addEventListener("compose-window-init", function (xEvent) {
    oGrammarChecker.loadUI();
    oGrammarChecker.closePanel();
    oGrammarChecker.clearPreview();
    oTextFormatter.init();

Added gc_lang/fr/webext/background.js version [03c66ef198].





















































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
// Background 

"use strict";

let xGCEWorker = new Worker("gce_worker.js");

xGCEWorker.onmessage = function (e) {
    switch (e.data[0]) {
        case "grammar_errors":
            console.log("GRAMMAR ERRORS");
            console.log(e.data[1].aGrammErr);
            break;
        case "spelling_and_grammar_errors":
            console.log("SPELLING AND GRAMMAR ERRORS");
            console.log(e.data[1].aSpellErr);
            console.log(e.data[1].aGrammErr);
            break;
        case "tests_results":
            console.log("TESTS RESULTS");
            console.log(e.data[1]);
            break;
        case "options":
            console.log("OPTIONS");
            console.log(e.data[1]);
            break;
        case "tokens":
            console.log("TOKENS");
            console.log(e.data[1]);
            break;
        case "error":
            console.log("ERROR");
            console.log(e.data[1]);
            break;
        default:
            console.log("Unknown command: " + e.data[0]);
    }
};

xGCEWorker.postMessage(["init", {sExtensionPath: browser.extension.getURL("."), sOptions: "", sContext: "Firefox"}]);

xGCEWorker.postMessage(["parse", {sText: "J’en aie mare...", sCountry: "FR", bDebug: false, bContext: false}]);

xGCEWorker.postMessage(["parseAndSpellcheck", {sText: "C’est terribles, ils va tout perdrre.", sCountry: "FR", bDebug: false, bContext: false}]);

xGCEWorker.postMessage(["getListOfTokens", {sText: "J’en ai assez de ces âneries ! Merci bien. Ça suffira."}]);

xGCEWorker.postMessage(["fullTests"]);


/*
    Messages from the extension (not the Worker)
*/
function handleMessage (oRequest, xSender, sendResponse) {
  console.log(`[background] received: ${oRequest.content}`);
  sendResponse({response: "response from background script"});
}

browser.runtime.onMessage.addListener(handleMessage);

Modified gc_lang/fr/webext/gce_worker.js from [8fda2777a3] to [2ab933f7e3].























































1
2
3
4
5
6

7
8
9










10


11
12

13
14






15
16
17
18








19
20
21
22
23
24
25
26
27
28
29

30

31

32
33
34

35
36
37

38

39




40
41
42
43
44
45
46
47
48
49
50
51
52

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87


88








89
90

91
92
93

94
95
96
97
98
99
100
101
102
103
104

105
106
107

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
























































/*
try {
    console.log("BEFORE");
    //var myhelpers = require('./grammalecte/helpers.js');

    require(['./grammalecte/helpers.js'], function (foo) {
        console.log("LOADING");
        echo("MODULE LOADED2");










    });


    console.log("AFTER");
}

catch (e) {
    console.log("\n" + e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message);






    console.error(e);
}*/

echo("VA TE FAIRE FOUTRE");











let gce = null; // module: grammar checker engine
let text = null;
let tkz = null; // module: tokenizer
let lxg = null; // module: lexicographer
let helpers = null;

let oTokenizer = null;
let oDict = null;

let oLxg = null;



function loadGrammarChecker (sGCOptions="", sContext="JavaScript") {
    if (gce === null) {
        try {

            gce = require("resource://grammalecte/fr/gc_engine.js");
            helpers = require("resource://grammalecte/helpers.js");
            text = require("resource://grammalecte/text.js");

            tkz = require("resource://grammalecte/tokenizer.js");

            lxg = require("resource://grammalecte/fr/lexicographe.js");




            oTokenizer = new tkz.Tokenizer("fr");
            helpers.setLogOutput(console.log);
            gce.load(sContext);
            oDict = gce.getDictionary();
            oLxg = new lxg.Lexicographe(oDict);
            if (sGCOptions !== "") {
                gce.setOptions(helpers.objectToMap(JSON.parse(sGCOptions)));
            }
            // we always retrieve options from the gce, for setOptions filters obsolete options
            return gce.getOptions()._toString();
        }
        catch (e) {
            console.log("# Error: " + e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message);

        }
    }
}

function parse (sText, sLang, bDebug, bContext) {
    let aGrammErr = gce.parse(sText, sLang, bDebug, bContext);
    return JSON.stringify(aGrammErr);
}

function parseAndSpellcheck (sText, sLang, bDebug, bContext) {
    let aGrammErr = gce.parse(sText, sLang, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sText, oDict);
    return JSON.stringify({ aGrammErr: aGrammErr, aSpellErr: aSpellErr });
}

function getOptions () {
    return gce.getOptions()._toString();
}

function getDefaultOptions () {
    return gce.getDefaultOptions()._toString();
}

function setOptions (sGCOptions) {
    gce.setOptions(helpers.objectToMap(JSON.parse(sGCOptions)));
    return gce.getOptions()._toString();
}

function setOption (sOptName, bValue) {
    gce.setOptions(new Map([ [sOptName, bValue] ]));
    return gce.getOptions()._toString();
}

function resetOptions () {
    gce.resetOptions();


    return gce.getOptions()._toString();








}


function fullTests (sGCOptions="") {
    if (!gce || !oDict) {
        return "# Error: grammar checker or dictionary not loaded."

    }
    let dMemoOptions = gce.getOptions();
    if (sGCOptions) {
        gce.setOptions(helpers.objectToMap(JSON.parse(sGCOptions)));
    }
    let tests = require("resource://grammalecte/tests.js");
    let oTest = new tests.TestGrammarChecking(gce);
    let sAllRes = "";
    for (let sRes of oTest.testParse()) {
        dump(sRes+"\n");
        sAllRes += sRes+"\n";

    }
    gce.setOptions(dMemoOptions);
    return sAllRes;

}


// Lexicographer

function getListOfElements (sText) {
    try {
        let aElem = [];
        let aRes = null;
        for (let oToken of oTokenizer.genTokens(sText)) {
            aRes = oLxg.getInfoForToken(oToken);
            if (aRes) {
                aElem.push(aRes);
            }
        }
        return JSON.stringify(aElem);
    }
    catch (e) {
        helpers.logerror(e);
    }
    return JSON.stringify([]);
}


function handleMessage (oRequest, xSender, sendResponse) {
  console.log(`[background] received: ${oRequest.content}`);
  sendResponse({response: "response from background script"});
}

browser.runtime.onMessage.addListener(handleMessage);


>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>



|
|
<
>
|
|
|
>
>
>
>
>
>
>
>
>
>
|
>
>
|
<
>
|
<
>
>
>
>
>
>
|
<
|
<
>
>
>
>
>
>
>
>
|
|

<
<
<
<
<

<

>

>

>
|
<
|
>
|
|
|
>
|
>
|
>
>
>
>
|
<
<
|
<
<
<
<
|
|
|
|
|
>
|
|
|
<
|
|
|


|
|

|



|



|



|
|



|
|



|
>
>
|
>
>
>
>
>
>
>
>
|
|
>
|
|
|
>

|

|

<
<
|

<
|
>

|
<
>





|









|



<
<
<
|
|
<
<
<

<
<
<
<
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

78
79

80
81
82
83
84
85
86

87

88
89
90
91
92
93
94
95
96
97
98





99

100
101
102
103
104
105
106

107
108
109
110
111
112
113
114
115
116
117
118
119
120


121




122
123
124
125
126
127
128
129
130

131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184


185
186

187
188
189
190

191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210



211
212



213




/*
    WORKER:
    https://developer.mozilla.org/en-US/docs/Web/API/Worker
    https://developer.mozilla.org/en-US/docs/Web/API/DedicatedWorkerGlobalScope


    JavaScript sucks.
    No module available in WebExtension at the moment! :(
    No require, no import/export.

    In Worker, we have importScripts() which imports everything in this scope.

    In order to use the same base of code with XUL-addon for Thunderbird and SDK-addon for Firefox,
    all modules have been “objectified”. And while they are still imported via “require”
    in the previous extensions, they are loaded as background scripts in WebExtension sharing
    the same memory space…

    When JavaScript become a modern language, “deobjectify” the modules…

    ATM, import/export are not available by default:
    — Chrome 60 – behind the Experimental Web Platform flag in chrome:flags.
    — Firefox 54 – behind the dom.moduleScripts.enabled setting in about:config.
    — Edge 15 – behind the Experimental JavaScript Features setting in about:flags.

    https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/import
    https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/export
*/

"use strict";


console.log("GC Engine Worker [start]");
//console.log(self);

importScripts("grammalecte/helpers.js");
importScripts("grammalecte/str_transform.js");
importScripts("grammalecte/ibdawg.js");
importScripts("grammalecte/text.js");
importScripts("grammalecte/tokenizer.js");
importScripts("grammalecte/fr/conj.js");
importScripts("grammalecte/fr/mfsp.js");
importScripts("grammalecte/fr/phonet.js");
importScripts("grammalecte/fr/cregex.js");
importScripts("grammalecte/fr/gc_options.js");
importScripts("grammalecte/fr/gc_rules.js");
importScripts("grammalecte/fr/gc_engine.js");
importScripts("grammalecte/fr/lexicographe.js");
importScripts("grammalecte/tests.js");
/*
    Warning.
    Initialization can’t be completed at startup of the worker,
    for we need the path of the extension to load data stored in JSON files.
    This path is retrieved in background.js and passed with the event “init”.
*/


/*
    Message Event Object
    https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent

*/
onmessage = function (e) {
    let oParam = e.data[1];
    switch (e.data[0]) {
        case "init":
            loadGrammarChecker(oParam.sExtensionPath, oParam.sOptions, oParam.sContext);
            break;
        case "parse":
            parse(oParam.sText, oParam.sCountry, oParam.bDebug, oParam.bContext);
            break;
        case "parseAndSpellcheck":
            parseAndSpellcheck(oParam.sText, oParam.sCountry, oParam.bDebug, oParam.bContext);
            break;
        case "getOptions":
            getOptions();
            break;
        case "getDefaultOptions":
            getDefaultOptions();

            break;
        case "setOptions":

            setOptions(oParam.sOptions);
            break;
        case "setOption":
            setOption(oParam.sOptName, oParam.bValue);
            break;
        case "resetOptions":
            resetOptions();

            break;

        case "fullTests":
            fullTests();
            break;
        case "getListOfTokens":
            getListOfTokens(oParam.sText);
            break;
        default:
            console.log("Unknown command: " + e.data[0]);
    }
}








let oDict = null;
let oTokenizer = null;
let oLxg = null;
let oTest = null;


function loadGrammarChecker (sExtensionPath, sGCOptions="", sContext="JavaScript") {

    try {
        console.log("Loading… Extension path: " + sExtensionPath);
        conj.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/conj_data.json"));
        phonet.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/phonet_data.json"));
        mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json"));
        console.log("Modules have been initialized…");
        gc_engine.load(sContext, sExtensionPath+"grammalecte/_dictionaries");
        oDict = gc_engine.getDictionary();
        oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json");
        oLxg = new Lexicographe(oDict);
        if (sGCOptions !== "") {
            gc_engine.setOptions(helpers.objectToMap(JSON.parse(sGCOptions)));
        }
        oTokenizer = new Tokenizer("fr");


        tests();




        // we always retrieve options from the gc_engine, for setOptions filters obsolete options
        postMessage(["options", gc_engine.getOptions().gl_toString()]);
    }
    catch (e) {
        console.error(e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message);
        postMessage(["error", e.message]);
    }
}


function parse (sText, sCountry, bDebug, bContext) {
    let aGrammErr = gc_engine.parse(sText, sCountry, bDebug, bContext);
    postMessage(["grammar_errors", {aGrammErr: aGrammErr}]);
}

function parseAndSpellcheck (sText, sCountry, bDebug, bContext) {
    let aGrammErr = gc_engine.parse(sText, sCountry, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sText, oDict);
    postMessage(["spelling_and_grammar_errors", {aGrammErr: aGrammErr, aSpellErr: aSpellErr}]);
}

function getOptions () {
    postMessage(["options", gc_engine.getOptions().gl_toString()]);
}

function getDefaultOptions () {
    postMessage(["options", gc_engine.getDefaultOptions().gl_toString()]);
}

function setOptions (sGCOptions) {
    gc_engine.setOptions(helpers.objectToMap(JSON.parse(sGCOptions)));
    postMessage(["options", gc_engine.getOptions().gl_toString()]);
}

function setOption (sOptName, bValue) {
    gc_engine.setOptions(new Map([ [sOptName, bValue] ]));
    postMessage(["options", gc_engine.getOptions().gl_toString()]);
}

function resetOptions () {
    gc_engine.resetOptions();
    postMessage(["options", gc_engine.getOptions().gl_toString()]);
}

function tests () {
    console.log(conj.getConj("devenir", ":E", ":2s"));
    console.log(mfsp.getMasForm("emmerdeuse", true));
    console.log(mfsp.getMasForm("pointilleuse", false));
    console.log(phonet.getSimil("est"));
    let aRes = gc_engine.parse("Je suit...");
    for (let oErr of aRes) {
        console.log(text.getReadableError(oErr));
    }
}

function fullTests (sGCOptions='{"nbsp":true, "esp":true, "unit":true, "num":true}') {
    if (!gc_engine || !oDict) {
        postMessage(["error", "# Error: grammar checker or dictionary not loaded."]);
        return;
    }
    let dMemoOptions = gc_engine.getOptions();
    if (sGCOptions) {
        gc_engine.setOptions(helpers.objectToMap(JSON.parse(sGCOptions)));
    }


    let sMsg = "";
    for (let sRes of oTest.testParse()) {

        sMsg += sRes + "\n";
        console.log(sRes);
    }
    gc_engine.setOptions(dMemoOptions);

    postMessage(["tests_results", sMsg]);
}


// Lexicographer

function getListOfTokens (sText) {
    try {
        let aElem = [];
        let aRes = null;
        for (let oToken of oTokenizer.genTokens(sText)) {
            aRes = oLxg.getInfoForToken(oToken);
            if (aRes) {
                aElem.push(aRes);
            }
        }
        postMessage(["tokens", aElem]);
    }
    catch (e) {
        helpers.logerror(e);



        postMessage(["error", e.message]);
    }



}




Modified gc_lang/fr/webext/manifest.json from [53ddc564c6] to [49413af971].

9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33


34
35

36
37


38
39
40
41
42
43
      "id": "French-GC@grammalecte.net",
      "strict_min_version": "54.0"
    }
  },

  "author": "Olivier R.",
  "homepage_url": "https://grammalecte.net",
  "offline_enabled": true,

  "description": "Correcteur grammatical pour le français.",

  "icons": { "16": "img/logo-16.png",
             "32": "img/logo-32.png",
             "48": "img/logo-48.png",
             "64": "img/logo-64.png",
             "96": "img/logo-96.png" },

  "browser_action": {
    "default_icon": "img/logo-32.png",
    "default_popup": "panel/main.html",
    "default_title": "Grammalecte [fr]",
    "browser_style": false
  },
  "background": {
    "scripts": ["require.js", "grammalecte/helpers.js", "gce_worker.js"]


  },
  "web_accessible_resources": [

    "beasts/frog.jpg",
    "beasts/turtle.jpg",


    "beasts/snake.jpg"
  ],
  "permissions": [
    "activeTab"
  ]
}







<
















|
>
>


>
|
<
>
>
|





9
10
11
12
13
14
15

16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

39
40
41
42
43
44
45
46
      "id": "French-GC@grammalecte.net",
      "strict_min_version": "54.0"
    }
  },

  "author": "Olivier R.",
  "homepage_url": "https://grammalecte.net",


  "description": "Correcteur grammatical pour le français.",

  "icons": { "16": "img/logo-16.png",
             "32": "img/logo-32.png",
             "48": "img/logo-48.png",
             "64": "img/logo-64.png",
             "96": "img/logo-96.png" },

  "browser_action": {
    "default_icon": "img/logo-32.png",
    "default_popup": "panel/main.html",
    "default_title": "Grammalecte [fr]",
    "browser_style": false
  },
  "background": {
    "scripts": [
      "background.js"
    ]
  },
  "web_accessible_resources": [
    "grammalecte/_dictionaries/French.json",
    "grammalecte/fr/conj_data.json",

    "grammalecte/fr/mfsp_data.json",
    "grammalecte/fr/phonet_data.json",
    "grammalecte/fr/tests_data.json"
  ],
  "permissions": [
    "activeTab"
  ]
}

Modified gc_lang/fr/xpi/data/conj_panel.js from [0a29bb81bd] to [eb75f474de].

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
                sVerb = sVerb.slice(2);
            }
            if (sVerb.endsWith("?")) {
                document.getElementById('oint').checked = true;
                sVerb = sVerb.slice(0,-1).trim();
            }

            if (!isVerb(sVerb)) {
                document.getElementById('verb').style = "color: #BB4411;";
            } else {
                self.port.emit("show");
                document.getElementById('verb_title').textContent = sVerb;
                document.getElementById('verb').style = "color: #999999;";
                document.getElementById('verb').value = "";
                oVerb = new Verb(sVerb);







|







67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
                sVerb = sVerb.slice(2);
            }
            if (sVerb.endsWith("?")) {
                document.getElementById('oint').checked = true;
                sVerb = sVerb.slice(0,-1).trim();
            }

            if (!conj.isVerb(sVerb)) {
                document.getElementById('verb').style = "color: #BB4411;";
            } else {
                self.port.emit("show");
                document.getElementById('verb_title').textContent = sVerb;
                document.getElementById('verb').style = "color: #999999;";
                document.getElementById('verb').value = "";
                oVerb = new Verb(sVerb);

Modified gc_lang/fr/xpi/gce_worker.js from [9e0a605cc0] to [32ac3727dc].

75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        }
        catch (e) {
            worker.log("# Error: " + e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message);
        }
    }
}

function parse (sText, sLang, bDebug, bContext) {
    let aGrammErr = gce.parse(sText, sLang, bDebug, bContext);
    return JSON.stringify(aGrammErr);
}

function parseAndSpellcheck (sText, sLang, bDebug, bContext) {
    let aGrammErr = gce.parse(sText, sLang, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sText, oDict);
    return JSON.stringify({ aGrammErr: aGrammErr, aSpellErr: aSpellErr });
}

function getOptions () {
    return gce.getOptions().gl_toString();
}







|
|



|
|







75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        }
        catch (e) {
            worker.log("# Error: " + e.fileName + "\n" + e.name + "\nline: " + e.lineNumber + "\n" + e.message);
        }
    }
}

function parse (sText, sCountry, bDebug, bContext) {
    let aGrammErr = gce.parse(sText, sCountry, bDebug, bContext);
    return JSON.stringify(aGrammErr);
}

function parseAndSpellcheck (sText, sCountry, bDebug, bContext) {
    let aGrammErr = gce.parse(sText, sCountry, bDebug, bContext);
    let aSpellErr = oTokenizer.getSpellingErrors(sText, oDict);
    return JSON.stringify({ aGrammErr: aGrammErr, aSpellErr: aSpellErr });
}

function getOptions () {
    return gce.getOptions().gl_toString();
}

Modified make.py from [18d1b24a9f] to [e6482fde8c].

350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
            # Firefox
            if xArgs.firefox:
                with helpers.cd("_build/xpi/"+sLang):
                    os.system("jpm run -b nightly")

            if xArgs.web_ext:
                with helpers.cd("_build/webext/"+sLang):
                    os.system(r'web-ext run --firefox="' + dVars['fx_beta_path'] + '" --browser-console')            

            # Thunderbird
            if xArgs.thunderbird:
                os.system("thunderbird -jsconsole -P debug")
        else:
            print("Folder not found: gc_lang/"+sLang)








|







350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
            # Firefox
            if xArgs.firefox:
                with helpers.cd("_build/xpi/"+sLang):
                    os.system("jpm run -b nightly")

            if xArgs.web_ext:
                with helpers.cd("_build/webext/"+sLang):
                    os.system(r'web-ext run --firefox="' + dVars['fx_nightly_path'] + '" --browser-console')            

            # Thunderbird
            if xArgs.thunderbird:
                os.system("thunderbird -jsconsole -P debug")
        else:
            print("Folder not found: gc_lang/"+sLang)