Grammalecte  Check-in [7ab24796b6]

Overview
Comment:merge trunk
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | tbnext
Files: files | file ages | folders
SHA3-256: 7ab24796b6c817ec9bf0566609dfa3691d42884f9495d4ec514ebfc4bd6cd04d
User & Date: olr on 2018-05-09 16:01:18
Other Links: branch diff | manifest | tags
Context
2018-05-09
16:06
[fr][tb] update install version number check-in: b2b8872097 user: olr tags: fr, tb, tbnext
16:01
merge trunk check-in: 7ab24796b6 user: olr tags: tbnext
15:58
[build] useless import check-in: 4bbd67edd8 user: olr tags: trunk, build
2018-04-26
17:14
[tb] remove Hunspell dictionaries selection (obsolete with TB 60+) check-in: fa07dc2d0e user: olr tags: tb, tbnext
Changes

Modified compile_rules.py from [b3cfeb04f1] to [b4b673c194].

1
2
3
4
5
6
7
8
9
10
11
12
13

import re
import sys
import traceback
import json
from distutils import file_util

import compile_rules_js_convert as jsconv


dDEF = {}
lFUNCTIONS = []



<


<







1
2

3
4

5
6
7
8
9
10
11

import re

import traceback
import json


import compile_rules_js_convert as jsconv


dDEF = {}
lFUNCTIONS = []

Modified compile_rules_js_convert.py from [da0ad4e711] to [5ad87f3f46].

30
31
32
33
34
35
36

37
38
39
40
41
42
43
    sCode = sCode.replace(".startswith", ".startsWith")
    sCode = sCode.replace(".lower", ".toLowerCase")
    sCode = sCode.replace(".upper", ".toUpperCase")
    sCode = sCode.replace(".isdigit", ".gl_isDigit")
    sCode = sCode.replace(".isupper", ".gl_isUpperCase")
    sCode = sCode.replace(".islower", ".gl_isLowerCase")
    sCode = sCode.replace(".istitle", ".gl_isTitle")

    sCode = sCode.replace(".capitalize", ".gl_toCapitalize")
    sCode = sCode.replace(".strip", ".gl_trim")
    sCode = sCode.replace(".lstrip", ".gl_trimLeft")
    sCode = sCode.replace(".rstrip", ".gl_trimRight")
    sCode = sCode.replace('.replace("."', r".replace(/\./g")
    sCode = sCode.replace('.replace("..."', r".replace(/\.\.\./g")
    sCode = re.sub(r'.replace\("([^"]+)" ?,', ".replace(/\\1/g,", sCode)







>







30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
    sCode = sCode.replace(".startswith", ".startsWith")
    sCode = sCode.replace(".lower", ".toLowerCase")
    sCode = sCode.replace(".upper", ".toUpperCase")
    sCode = sCode.replace(".isdigit", ".gl_isDigit")
    sCode = sCode.replace(".isupper", ".gl_isUpperCase")
    sCode = sCode.replace(".islower", ".gl_isLowerCase")
    sCode = sCode.replace(".istitle", ".gl_isTitle")
    sCode = sCode.replace(".isalpha", ".gl_isAlpha")
    sCode = sCode.replace(".capitalize", ".gl_toCapitalize")
    sCode = sCode.replace(".strip", ".gl_trim")
    sCode = sCode.replace(".lstrip", ".gl_trimLeft")
    sCode = sCode.replace(".rstrip", ".gl_trimRight")
    sCode = sCode.replace('.replace("."', r".replace(/\./g")
    sCode = sCode.replace('.replace("..."', r".replace(/\.\.\./g")
    sCode = re.sub(r'.replace\("([^"]+)" ?,', ".replace(/\\1/g,", sCode)

Modified gc_core/py/oxt/OptionsDialog.xcs from [6ddcee836f] to [7f3cb8622b].

22
23
24
25
26
27
28

29
30
31
32
33
34
35
                <desc>The data for one leaf.</desc>
            </info>
            <prop oor:name="use_graphspell" oor:type="xs:int"><value>1</value></prop>
            <prop oor:name="use_graphspell_sugg" oor:type="xs:int"><value>1</value></prop>
            <prop oor:name="use_extended_dic" oor:type="xs:int"><value>0</value></prop>
            <prop oor:name="use_community_dic" oor:type="xs:int"><value>0</value></prop>
            <prop oor:name="use_personal_dic" oor:type="xs:int"><value>1</value></prop>

            <prop oor:name="extended_dic" oor:type="xs:string"><value></value></prop>
            <prop oor:name="community_dic" oor:type="xs:string"><value></value></prop>
            <prop oor:name="personal_dic" oor:type="xs:string"><value></value></prop>
        </group>
    </templates>

    <component>







>







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
                <desc>The data for one leaf.</desc>
            </info>
            <prop oor:name="use_graphspell" oor:type="xs:int"><value>1</value></prop>
            <prop oor:name="use_graphspell_sugg" oor:type="xs:int"><value>1</value></prop>
            <prop oor:name="use_extended_dic" oor:type="xs:int"><value>0</value></prop>
            <prop oor:name="use_community_dic" oor:type="xs:int"><value>0</value></prop>
            <prop oor:name="use_personal_dic" oor:type="xs:int"><value>1</value></prop>
            <prop oor:name="main_dic_name" oor:type="xs:string"><value>classic</value></prop>
            <prop oor:name="extended_dic" oor:type="xs:string"><value></value></prop>
            <prop oor:name="community_dic" oor:type="xs:string"><value></value></prop>
            <prop oor:name="personal_dic" oor:type="xs:string"><value></value></prop>
        </group>
    </templates>

    <component>

Modified gc_lang/fr/build_data.py from [1f69de4a2f] to [8989f0e8f3].

1
2
3
4
5
6
7
8
9
10

11
12
13
14
15
16
17
#!python3

# FRENCH DATA BUILDER
#
# by Olivier R.
# License: MPL 2

import json
import os
import itertools


import graphspell.ibdawg as ibdawg
from graphspell.echo import echo
from graphspell.str_transform import defineSuffixCode
import graphspell.tokenizer as tkz












>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#!python3

# FRENCH DATA BUILDER
#
# by Olivier R.
# License: MPL 2

import json
import os
import itertools
import traceback

import graphspell.ibdawg as ibdawg
from graphspell.echo import echo
from graphspell.str_transform import defineSuffixCode
import graphspell.tokenizer as tkz


267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
def makePhonetTable (sp, bJS=False):
    print("> Correspondances phonétiques ", end="")
    print("(Python et JavaScript)"  if bJS  else "(Python seulement)")
    
    import gc_lang.fr.modules.conj as conj

    try:
        oDict = ibdawg.IBDAWG("fr.bdic")
    except:
        traceback.print_exc()
        return

    # set of homophonic words
    lSet = []
    for sLine in readFile(sp+"/data/phonet_simil.txt"):







|







268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
def makePhonetTable (sp, bJS=False):
    print("> Correspondances phonétiques ", end="")
    print("(Python et JavaScript)"  if bJS  else "(Python seulement)")
    
    import gc_lang.fr.modules.conj as conj

    try:
        oDict = ibdawg.IBDAWG("fr-allvars.bdic")
    except:
        traceback.print_exc()
        return

    # set of homophonic words
    lSet = []
    for sLine in readFile(sp+"/data/phonet_simil.txt"):

Modified gc_lang/fr/config.ini from [6a9a7ae65b] to [fbfd84b975].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20



21
22
23
24
25
26
27
[args]
lang = fr
lang_name = French
locales = fr_FR fr_BE fr_CA fr_CH fr_LU fr_BF fr_BJ fr_CD fr_CI fr_CM fr_MA fr_ML fr_MU fr_NE fr_RE fr_SN fr_TG 
country_default = FR
name = Grammalecte
implname = grammalecte
# always use 3 numbers for version: x.y.z
version = 0.6.3.2
author = Olivier R.
provider = Dicollecte
link = http://grammalecte.net
description = Correcteur grammatical pour le français.
extras = README_fr.txt
logo = logo.png

# main dictionary
lexicon_src = lexicons/French.lex
dic_filename = fr
dic_name = French



# extended dictionary
lexicon_extended_src = lexicons/French.extended.lex
dic_extended_filename = fr.extended
dic_extended_name = Français - dictionnaire étendu
# community dictionary
lexicon_community_src = lexicons/French.community.lex
dic_community_filename = fr.community








|









|
|
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
[args]
lang = fr
lang_name = French
locales = fr_FR fr_BE fr_CA fr_CH fr_LU fr_BF fr_BJ fr_CD fr_CI fr_CM fr_MA fr_ML fr_MU fr_NE fr_RE fr_SN fr_TG 
country_default = FR
name = Grammalecte
implname = grammalecte
# always use 3 numbers for version: x.y.z
version = 0.6.4.2
author = Olivier R.
provider = Dicollecte
link = http://grammalecte.net
description = Correcteur grammatical pour le français.
extras = README_fr.txt
logo = logo.png

# main dictionary
lexicon_src = lexicons/French.lex
dic_filenames = fr-allvars,fr-classic,fr-reform
dic_name = Français,Français (Classique/Moderne),Français (Réforme 1990)
dic_filter = ,[*CMPX]$,[*RPX]$
dic_default_filename_py = fr-allvars
dic_default_filename_js = fr-allvars
# extended dictionary
lexicon_extended_src = lexicons/French.extended.lex
dic_extended_filename = fr.extended
dic_extended_name = Français - dictionnaire étendu
# community dictionary
lexicon_community_src = lexicons/French.community.lex
dic_community_filename = fr.community

Modified gc_lang/fr/dictionnaire/genfrdic.py from [5f240a0703] to [21ee33ebdc].

59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
               'shortname': '“Classique”',
               'asciiName': 'fr-classique',
               'mozAsciiName': 'fr-FR-classic',
               'subDicts': '*MCX',
               'mozId': 'fr-dicollecte-classique',
               'description': "Dictionnaire français “Classique”" }

dCLASSIQUEX = { 'name': 'DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “CLASSIQUE ÉTENDU”',
                'shortname': '“Classique étendu”',
                'asciiName': 'fr-classique-ext',
                'mozAsciiName': 'fr-FR-classic-ext',
                'subDicts': '*MCX',
                'mozId': 'fr-dicollecte-classique-ext',
                'description': "Dictionnaire français “Classique étendu”" }

dREFORME1990 = { 'name': 'DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “RÉFORME 1990”',
                 'shortname': '“Réforme 1990”',
                 'asciiName': 'fr-reforme1990',
                 'mozAsciiName': 'fr-FR-reform',
                 'subDicts': '*RX',
                 'mozId': 'fr-dicollecte-reforme1990',
                 'description': "Dictionnaire français “Réforme 1990”" }







<
<
<
<
<
<
<
<







59
60
61
62
63
64
65








66
67
68
69
70
71
72
               'shortname': '“Classique”',
               'asciiName': 'fr-classique',
               'mozAsciiName': 'fr-FR-classic',
               'subDicts': '*MCX',
               'mozId': 'fr-dicollecte-classique',
               'description': "Dictionnaire français “Classique”" }









dREFORME1990 = { 'name': 'DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “RÉFORME 1990”',
                 'shortname': '“Réforme 1990”',
                 'asciiName': 'fr-reforme1990',
                 'mozAsciiName': 'fr-FR-reform',
                 'subDicts': '*RX',
                 'mozId': 'fr-dicollecte-reforme1990',
                 'description': "Dictionnaire français “Réforme 1990”" }
1252
1253
1254
1255
1256
1257
1258

1259


1260
1261
1262
1263
1264
1265
1266
        # SEM
        #s += "~" + self.oEntry.se  if self.oEntry.se and self.oEntry.se != "@"  else ""
        # ETY
        #s += "<" + self.oEntry.et  if self.oEntry.et and self.oEntry.et != "@"  else ""
        # IFQ
        #s += "=" + self.cFq
        # DIC

        s += "/" + self.cDic


        return s

    def keyTriNat (self):
        return (self.sFlexion.translate(CHARMAP), self.sMorph)

    def keyFreq (self):
        return (100-self.fFreq, self.oEntry.sRadical, self.sFlexion)







>
|
>
>







1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
        # SEM
        #s += "~" + self.oEntry.se  if self.oEntry.se and self.oEntry.se != "@"  else ""
        # ETY
        #s += "<" + self.oEntry.et  if self.oEntry.et and self.oEntry.et != "@"  else ""
        # IFQ
        #s += "=" + self.cFq
        # DIC
        if self.oEntry.di == "*" and self.cDic != "*":
            s += "/" + self.cDic
        else:
            s += "/" + self.oEntry.di
        return s

    def keyTriNat (self):
        return (self.sFlexion.translate(CHARMAP), self.sMorph)

    def keyFreq (self):
        return (100-self.fFreq, self.oEntry.sRadical, self.sFlexion)

Modified gc_lang/fr/modules-js/phonet_data.json from [1e891509d7] to [a832b53c50].

cannot compute difference between binary files

Modified gc_lang/fr/modules-js/textformatter.js from [287866f4c9] to [a9dd9e148e].

53
54
55
56
57
58
59




60
61
62
63
64
65
66
                                    [/[  ]+:/g, " :"] ],
    "nnbsp_within_quotation_marks":[[/«([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ])/g, "« $1"],
                                    [/«[  ]+/g, "« "],
                                    [/([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ.!?])»/g, "$1 »"],
                                    [/[  ]+»/g, " »"] ],
    "nnbsp_within_numbers":       [ [/(\d)[  ](\d)/g, "$1 $2"] ],
    // common




    "nbsp_before_symbol":         [ [/(\d) ?([%‰€$£¥˚Ω℃])/g, "$1 $2"] ],
    "nbsp_before_units":          [ [/([0-9⁰¹²³⁴⁵⁶⁷⁸⁹]) ?([kcmµn]?(?:[slgJKΩ]|m[²³]?|Wh?|Hz|dB)|[%‰]|°C)\b/g, "$1 $2"] ],
    "nbsp_repair":                [ [/([\[(])[   ]([!?:;])/g, "$1$2"],
                                    [/(https?|ftp)[   ]:\/\//g, "$1://"],
                                    [/&([a-z]+)[   ];/g, "&$1;"],
                                    [/&#([0-9]+|x[0-9a-fA-F]+)[   ];/g, "&#$1;"] ],
    //// missing spaces







>
>
>
>







53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
                                    [/[  ]+:/g, " :"] ],
    "nnbsp_within_quotation_marks":[[/«([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ])/g, "« $1"],
                                    [/«[  ]+/g, "« "],
                                    [/([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ.!?])»/g, "$1 »"],
                                    [/[  ]+»/g, " »"] ],
    "nnbsp_within_numbers":       [ [/(\d)[  ](\d)/g, "$1 $2"] ],
    // common
    "nbsp_titles":                [ [/\bM(mes?|ᵐᵉˢ?|grs?|ᵍʳˢ?|lles?|ˡˡᵉˢ?|rs?|ʳˢ?|M[.]) /g, "M$1 "],
                                    [/\bP(re?s?|ʳᵉ?ˢ?) /g, "P$1 "],
                                    [/\bD(re?s?|ʳᵉ?ˢ?) /g, "D$1 "],
                                    [/\bV(ves?|ᵛᵉˢ?) /g, "V$1 "] ],
    "nbsp_before_symbol":         [ [/(\d) ?([%‰€$£¥˚Ω℃])/g, "$1 $2"] ],
    "nbsp_before_units":          [ [/([0-9⁰¹²³⁴⁵⁶⁷⁸⁹]) ?([kcmµn]?(?:[slgJKΩ]|m[²³]?|Wh?|Hz|dB)|[%‰]|°C)\b/g, "$1 $2"] ],
    "nbsp_repair":                [ [/([\[(])[   ]([!?:;])/g, "$1$2"],
                                    [/(https?|ftp)[   ]:\/\//g, "$1://"],
                                    [/&([a-z]+)[   ];/g, "&$1;"],
                                    [/&#([0-9]+|x[0-9a-fA-F]+)[   ];/g, "&#$1;"] ],
    //// missing spaces
211
212
213
214
215
216
217

218
219
220
221
222
223
224
    ["within_quotation_marks", true],
    ["nbsp_before_punctuation", true],
    ["nbsp_within_quotation_marks", true],
    ["nbsp_within_numbers", true],
    ["nnbsp_before_punctuation", false],
    ["nnbsp_within_quotation_marks", false],
    ["nnbsp_within_numbers", false],

    ["nbsp_before_symbol", true],
    ["nbsp_before_units", true],
    ["nbsp_repair", true],
    ["add_space_after_punctuation", true],
    ["add_space_around_hyphens", true],
    ["add_space_repair", true],
    ["erase_non_breaking_hyphens", false],







>







215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
    ["within_quotation_marks", true],
    ["nbsp_before_punctuation", true],
    ["nbsp_within_quotation_marks", true],
    ["nbsp_within_numbers", true],
    ["nnbsp_before_punctuation", false],
    ["nnbsp_within_quotation_marks", false],
    ["nnbsp_within_numbers", false],
    ["nbsp_titles", false],
    ["nbsp_before_symbol", true],
    ["nbsp_before_units", true],
    ["nbsp_repair", true],
    ["add_space_after_punctuation", true],
    ["add_space_around_hyphens", true],
    ["add_space_repair", true],
    ["erase_non_breaking_hyphens", false],

Modified gc_lang/fr/modules/phonet_data.py from [e1577c5f69] to [9980015f68].

cannot compute difference between binary files

Modified gc_lang/fr/modules/tests.py from [43d45242b9] to [2e6f413e05].

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
    return s.replace("\u2019", "'").replace("\u2013", "–").replace("\u2014", "—")


class TestDictionary (unittest.TestCase):

    @classmethod
    def setUpClass (cls):
        cls.oDic = IBDAWG("${dic_filename}.bdic")

    def test_lookup (self):
        for sWord in ["branche", "Émilie"]:
            self.assertTrue(self.oDic.lookup(sWord), sWord)

    def test_lookup_failed (self):
        for sWord in ["Branche", "BRANCHE", "BranchE", "BRanche", "BRAnCHE", "émilie"]:







|







20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
    return s.replace("\u2019", "'").replace("\u2013", "–").replace("\u2014", "—")


class TestDictionary (unittest.TestCase):

    @classmethod
    def setUpClass (cls):
        cls.oDic = IBDAWG("${dic_main_filename_py}")

    def test_lookup (self):
        for sWord in ["branche", "Émilie"]:
            self.assertTrue(self.oDic.lookup(sWord), sWord)

    def test_lookup_failed (self):
        for sWord in ["Branche", "BRANCHE", "BranchE", "BRanche", "BRAnCHE", "émilie"]:

Modified gc_lang/fr/modules/textformatter.py from [f190943db7] to [8fb9ec33bf].

35
36
37
38
39
40
41




42
43
44
45
46
47
48
                                    ("[  ]+:", " :")],
    "nnbsp_within_quotation_marks":[("«(?=\\w)", "« "),
                                    ("«[  ]+", "« "),
                                    ("(?<=[\\w.!?])»", " »"),
                                    ("[  ]+»", " »")],
    "nnbsp_within_numbers":        [("(\\d)[  ](\\d)", "\\1 \\2")],
    # common




    "nbsp_before_symbol":          [("(\\d) ?([%‰€$£¥˚Ω℃])", "\\1 \\2")],
    "nbsp_before_units":           [("(?<=[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]) ?([kcmµn]?(?:[slgJKΩ]|m[²³]?|Wh?|Hz|dB)|[%‰]|°C)\\b", " \\1")],
    "nbsp_repair":                 [("(?<=[[(])[   ]([!?:;])", "\\1"),
                                    ("(https?|ftp)[   ]:(?=//)", "\\1:"),
                                    ("&([a-z]+)[   ];", "&\\1;"),
                                    ("&#([0-9]+|x[0-9a-fA-F]+)[   ];", "&#\\1;")],
    ## missing spaces







>
>
>
>







35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
                                    ("[  ]+:", " :")],
    "nnbsp_within_quotation_marks":[("«(?=\\w)", "« "),
                                    ("«[  ]+", "« "),
                                    ("(?<=[\\w.!?])»", " »"),
                                    ("[  ]+»", " »")],
    "nnbsp_within_numbers":        [("(\\d)[  ](\\d)", "\\1 \\2")],
    # common
    "nbsp_titles":                 [("\\bM(mes?|ᵐᵉˢ?|grs?|ᵍʳˢ?|lles?|ˡˡᵉˢ?|rs?|ʳˢ?|M\\.) ", "M\\1 "),
                                    ("\\bP(re?s?|ʳᵉ?ˢ?) ", "P\\1 "),
                                    ("\\bD(re?s?|ʳᵉ?ˢ?) ", "D\\1 "),
                                    ("\\bV(ves?|ᵛᵉˢ?) ", "V\\1 ")],
    "nbsp_before_symbol":          [("(\\d) ?([%‰€$£¥˚Ω℃])", "\\1 \\2")],
    "nbsp_before_units":           [("(?<=[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]) ?([kcmµn]?(?:[slgJKΩ]|m[²³]?|Wh?|Hz|dB)|[%‰]|°C)\\b", " \\1")],
    "nbsp_repair":                 [("(?<=[[(])[   ]([!?:;])", "\\1"),
                                    ("(https?|ftp)[   ]:(?=//)", "\\1:"),
                                    ("&([a-z]+)[   ];", "&\\1;"),
                                    ("&#([0-9]+|x[0-9a-fA-F]+)[   ];", "&#\\1;")],
    ## missing spaces
192
193
194
195
196
197
198

199
200
201
202
203
204
205
    ("within_quotation_marks", True),
    ("nbsp_before_punctuation", True),
    ("nbsp_within_quotation_marks", True),
    ("nbsp_within_numbers", True),
    ("nnbsp_before_punctuation", False),
    ("nnbsp_within_quotation_marks", False),
    ("nnbsp_within_numbers", False),

    ("nbsp_before_symbol", True),
    ("nbsp_before_units", True),
    ("nbsp_repair", True),
    ("add_space_after_punctuation", True),
    ("add_space_around_hyphens", True),
    ("add_space_repair", True),
    ("erase_non_breaking_hyphens", False),







>







196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
    ("within_quotation_marks", True),
    ("nbsp_before_punctuation", True),
    ("nbsp_within_quotation_marks", True),
    ("nbsp_within_numbers", True),
    ("nnbsp_before_punctuation", False),
    ("nnbsp_within_quotation_marks", False),
    ("nnbsp_within_numbers", False),
    ("nbsp_titles", False),
    ("nbsp_before_symbol", True),
    ("nbsp_before_units", True),
    ("nbsp_repair", True),
    ("add_space_after_punctuation", True),
    ("add_space_around_hyphens", True),
    ("add_space_repair", True),
    ("erase_non_breaking_hyphens", False),

Modified gc_lang/fr/oxt/ContextMenu/ContextMenu.py from [512c45de75] to [03a78a32c7].

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
            if not oSpellChecker:
                xCurCtx = uno.getComponentContext()
                oGC = self.ctx.ServiceManager.createInstanceWithContext("org.openoffice.comp.pyuno.Lightproof.grammalecte", self.ctx)
                if hasattr(oGC, "getSpellChecker"):
                    # https://bugs.documentfoundation.org/show_bug.cgi?id=97790
                    oSpellChecker = oGC.getSpellChecker()
                else:
                    oSpellChecker = SpellChecker("${lang}", "${dic_filename}.bdic")
            if not oLexicographe:
                oLexicographe = lxg.Lexicographe(oSpellChecker)
        except:
            traceback.print_exc()
        
    def execute (self, args):
        if not args:







|







127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
            if not oSpellChecker:
                xCurCtx = uno.getComponentContext()
                oGC = self.ctx.ServiceManager.createInstanceWithContext("org.openoffice.comp.pyuno.Lightproof.grammalecte", self.ctx)
                if hasattr(oGC, "getSpellChecker"):
                    # https://bugs.documentfoundation.org/show_bug.cgi?id=97790
                    oSpellChecker = oGC.getSpellChecker()
                else:
                    oSpellChecker = SpellChecker("${lang}", "fr-allvars.bdic")
            if not oLexicographe:
                oLexicographe = lxg.Lexicographe(oSpellChecker)
        except:
            traceback.print_exc()
        
    def execute (self, args):
        if not args:

Modified gc_lang/fr/oxt/DictOptions/DictOptions.py from [3eb1b2d60d] to [f2057c6bbe].

10
11
12
13
14
15
16












17
18
19
20
21
22
23
import helpers
import do_strings

from com.sun.star.task import XJobExecutor
from com.sun.star.awt import XActionListener
from com.sun.star.beans import PropertyValue














class DictOptions (unohelper.Base, XActionListener, XJobExecutor):

    def __init__ (self, ctx):
        self.ctx = ctx
        self.xSvMgr = self.ctx.ServiceManager
        self.xContainer = None







>
>
>
>
>
>
>
>
>
>
>
>







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import helpers
import do_strings

from com.sun.star.task import XJobExecutor
from com.sun.star.awt import XActionListener
from com.sun.star.beans import PropertyValue

from com.sun.star.awt.MessageBoxButtons import BUTTONS_OK
# BUTTONS_OK, BUTTONS_OK_CANCEL, BUTTONS_YES_NO, BUTTONS_YES_NO_CANCEL, BUTTONS_RETRY_CANCEL, BUTTONS_ABORT_IGNORE_RETRY
# DEFAULT_BUTTON_OK, DEFAULT_BUTTON_CANCEL, DEFAULT_BUTTON_RETRY, DEFAULT_BUTTON_YES, DEFAULT_BUTTON_NO, DEFAULT_BUTTON_IGNORE
from com.sun.star.awt.MessageBoxType import INFOBOX, ERRORBOX # MESSAGEBOX, INFOBOX, WARNINGBOX, ERRORBOX, QUERYBOX

def MessageBox (xDocument, sMsg, sTitle, nBoxType=INFOBOX, nBoxButtons=BUTTONS_OK):
    xParentWin = xDocument.CurrentController.Frame.ContainerWindow
    ctx = uno.getComponentContext()
    xToolkit = ctx.ServiceManager.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx) 
    xMsgBox = xToolkit.createMessageBox(xParentWin, nBoxType, nBoxButtons, sTitle, sMsg)
    return xMsgBox.execute()


class DictOptions (unohelper.Base, XActionListener, XJobExecutor):

    def __init__ (self, ctx):
        self.ctx = ctx
        self.xSvMgr = self.ctx.ServiceManager
        self.xContainer = None
32
33
34
35
36
37
38
39
40


41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83





84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107


108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126









127



128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149









150
151
152
153
154
155
        xWidget.Height = h
        for k, w in kwargs.items():
            setattr(xWidget, k, w)
        self.xDialog.insertByName(name, xWidget)
        return xWidget

    def run (self, sLang):
        dUI = do_strings.getUI(sLang)



        self.xSettingNode = helpers.getConfigSetting("/org.openoffice.Lightproof_grammalecte/Other/", True)

        # dialog
        self.xDialog = self.xSvMgr.createInstanceWithContext('com.sun.star.awt.UnoControlDialogModel', self.ctx)
        self.xDialog.Width = 200
        self.xDialog.Height = 280
        self.xDialog.Title = dUI.get('title', "#title#")
        xWindowSize = helpers.getWindowSize()
        self.xDialog.PositionX = int((xWindowSize.Width / 2) - (self.xDialog.Width / 2))
        self.xDialog.PositionY = int((xWindowSize.Height / 2) - (self.xDialog.Height / 2))

        # fonts
        xFDTitle = uno.createUnoStruct("com.sun.star.awt.FontDescriptor")
        xFDTitle.Height = 9
        xFDTitle.Weight = uno.getConstantByName("com.sun.star.awt.FontWeight.BOLD")
        xFDTitle.Name = "Verdana"
        
        xFDSubTitle = uno.createUnoStruct("com.sun.star.awt.FontDescriptor")
        xFDSubTitle.Height = 10
        xFDSubTitle.Weight = uno.getConstantByName("com.sun.star.awt.FontWeight.BOLD")
        xFDSubTitle.Name = "Verdana"

        # widget
        nX = 10
        nY1 = 10
        nY2 = nY1 + 35
        nY3 = nY2 + 35
        nY4 = nY3 + 35
        nY5 = nY4 + 45
        nY6 = nY5 + 70

        nWidth = self.xDialog.Width - 20
        nHeight = 10

        # Spell checker section
        #self._addWidget("spelling_section", 'FixedLine', nX, nY1, nWidth, nHeight, Label = dUI.get("spelling_section", "#err"), FontDescriptor = xFDTitle)
        #self.xGraphspell = self._addWidget('activate_main', 'CheckBox', nX, nY1+15, nWidth, nHeight, Label = dUI.get('activate_main', "#err"))
        #self._addWidget('activate_main_descr', 'FixedText', nX, nY1+25, nWidth, nHeight*2, Label = dUI.get('activate_main_descr', "#err"), MultiLine = True)

        # Graphspell dictionary section
        self._addWidget("graphspell_section", 'FixedLine', nX, nY1, nWidth, nHeight, Label = dUI.get("graphspell_section", "#err"), FontDescriptor = xFDTitle)
        self.xMainDic = self._addWidget('activate_main', 'CheckBox', nX, nY1+15, nWidth, nHeight, Label = dUI.get('activate_main', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088, State = True)
        self._addWidget('activate_main_descr', 'FixedText', nX+10, nY1+25, nWidth-10, nHeight*2, Label = dUI.get('activate_main_descr', "#err"), MultiLine = True)





        self.xExtendedDic = self._addWidget('activate_extended', 'CheckBox', nX, nY2+15, nWidth, nHeight, Label = dUI.get('activate_extended', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088, Enabled = False)
        self._addWidget('activate_extended_descr', 'FixedText', nX+10, nY2+25, nWidth-10, nHeight*2, Label = dUI.get('activate_extended_descr', "#err"), MultiLine = True)
        self.xCommunityDic = self._addWidget('activate_community', 'CheckBox', nX, nY3+15, nWidth, nHeight, Label = dUI.get('activate_community', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088, Enabled = False)
        self._addWidget('activate_community_descr', 'FixedText', nX+10, nY3+25, nWidth-10, nHeight*2, Label = dUI.get('activate_community_descr', "#err"), MultiLine = True)
        self.xPersonalDic = self._addWidget('activate_personal', 'CheckBox', nX, nY4+15, nWidth, nHeight, Label = dUI.get('activate_personal', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088)
        self._addWidget('activate_personal_descr', 'FixedText', nX+10, nY4+25, nWidth-10, nHeight*2, Label = dUI.get('activate_personal_descr', "#err"), MultiLine = True)
        
        # Spell suggestion engine section
        self._addWidget("suggestion_section", 'FixedLine', nX, nY5, nWidth, nHeight, Label = dUI.get("suggestion_section", "#err"), FontDescriptor = xFDTitle)
        self.xGraphspellSugg = self._addWidget('activate_spell_sugg', 'CheckBox', nX, nY5+15, nWidth, nHeight, Label = dUI.get('activate_spell_sugg', "#err"))
        self._addWidget('activate_spell_sugg_descr', 'FixedText', nX, nY5+25, nWidth, nHeight*4, Label = dUI.get('activate_spell_sugg_descr', "#err"), MultiLine = True)

        # Restart message
        self._addWidget('restart', 'FixedText', nX, nY6, nWidth, nHeight*2, Label = dUI.get('restart', "#err"), FontDescriptor = xFDTitle, MultiLine = True, TextColor = 0x880000)

        # Button
        self._addWidget('apply_button', 'Button', self.xDialog.Width-115, self.xDialog.Height-25, 50, 14, Label = dUI.get('apply_button', "#err"), FontDescriptor = xFDTitle, TextColor = 0x005500)
        self._addWidget('cancel_button', 'Button', self.xDialog.Width-60, self.xDialog.Height-25, 50, 14, Label = dUI.get('cancel_button', "#err"), FontDescriptor = xFDTitle, TextColor = 0x550000)

        self._loadOptions()

        # container
        self.xContainer = self.xSvMgr.createInstanceWithContext('com.sun.star.awt.UnoControlDialog', self.ctx)
        self.xContainer.setModel(self.xDialog)


        self.xContainer.getControl('apply_button').addActionListener(self)
        self.xContainer.getControl('apply_button').setActionCommand('Apply')
        self.xContainer.getControl('cancel_button').addActionListener(self)
        self.xContainer.getControl('cancel_button').setActionCommand('Cancel')
        self.xContainer.setVisible(False)
        toolkit = self.xSvMgr.createInstanceWithContext('com.sun.star.awt.ExtToolkit', self.ctx)
        self.xContainer.createPeer(toolkit, None)
        self.xContainer.execute()

    # XActionListener
    def actionPerformed (self, xActionEvent):
        try:
            if xActionEvent.ActionCommand == 'Apply':
                xChild = self.xSettingNode.getByName("o_fr")
                #xChild.setPropertyValue("use_graphspell", self.xGraphspell.State)
                xChild.setPropertyValue("use_graphspell_sugg", self.xGraphspellSugg.State)
                #xChild.setPropertyValue("use_extended_dic", self.xExtendedDic.State)
                #xChild.setPropertyValue("use_community_dic", self.xCommunityDic.State)
                xChild.setPropertyValue("use_personal_dic", self.xPersonalDic.State)









                self.xSettingNode.commitChanges()



            else:
                pass
            self.xContainer.endExecute()
        except:
            traceback.print_exc()
    
    # XJobExecutor
    def trigger (self, args):
        try:
            dialog = DictOptions(self.ctx)
            dialog.run()
        except:
            traceback.print_exc()

    def _loadOptions (self):
        try:
            xChild = self.xSettingNode.getByName("o_fr")
            #self.xGraphspell.State = xChild.getPropertyValue("use_graphspell")
            self.xGraphspellSugg.State = xChild.getPropertyValue("use_graphspell_sugg")
            #self.xExtendedDic.State = xChild.getPropertyValue("use_extended_dic")
            #self.xCommunityDic.State = xChild.getPropertyValue("use_community_dic")
            self.xPersonalDic.State = xChild.getPropertyValue("use_personal_dic")









        except:
            traceback.print_exc()


#g_ImplementationHelper = unohelper.ImplementationHelper()
#g_ImplementationHelper.addImplementation(DictOptions, 'net.grammalecte.graphspell.DictOptions', ('com.sun.star.task.Job',))







|

>
>
|




|
|


















|
|
|

|




<
<
<
<
<

|
|
|
>
>
>
>
>
|
|
|
|
|
|


|
|
|


|


|
|






>
>













|
<
<



>
>
>
>
>
>
>
>
>
|
>
>
>

<
|













|





>
>
>
>
>
>
>
>
>






44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88





89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137


138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
        xWidget.Height = h
        for k, w in kwargs.items():
            setattr(xWidget, k, w)
        self.xDialog.insertByName(name, xWidget)
        return xWidget

    def run (self, sLang):
        self.dUI = do_strings.getUI(sLang)

        self.xDesktop = self.xSvMgr.createInstanceWithContext("com.sun.star.frame.Desktop", self.ctx)
        self.xDocument = self.xDesktop.getCurrentComponent()
        self.xOptionNode = helpers.getConfigSetting("/org.openoffice.Lightproof_grammalecte/Other/", True)

        # dialog
        self.xDialog = self.xSvMgr.createInstanceWithContext('com.sun.star.awt.UnoControlDialogModel', self.ctx)
        self.xDialog.Width = 200
        self.xDialog.Height = 310
        self.xDialog.Title = self.dUI.get('title', "#title#")
        xWindowSize = helpers.getWindowSize()
        self.xDialog.PositionX = int((xWindowSize.Width / 2) - (self.xDialog.Width / 2))
        self.xDialog.PositionY = int((xWindowSize.Height / 2) - (self.xDialog.Height / 2))

        # fonts
        xFDTitle = uno.createUnoStruct("com.sun.star.awt.FontDescriptor")
        xFDTitle.Height = 9
        xFDTitle.Weight = uno.getConstantByName("com.sun.star.awt.FontWeight.BOLD")
        xFDTitle.Name = "Verdana"
        
        xFDSubTitle = uno.createUnoStruct("com.sun.star.awt.FontDescriptor")
        xFDSubTitle.Height = 10
        xFDSubTitle.Weight = uno.getConstantByName("com.sun.star.awt.FontWeight.BOLD")
        xFDSubTitle.Name = "Verdana"

        # widget
        nX = 10
        nY1 = 10
        nY2 = nY1 + 60
        nY3 = nY2 + 25
        nY4 = nY3 + 25
        nY5 = nY4 + 45
        nY6 = nY5 + 95

        nWidth = self.xDialog.Width - 20
        nHeight = 10






        # Graphspell dictionary section
        self._addWidget("graphspell_section", 'FixedLine', nX, nY1, nWidth, nHeight, Label = self.dUI.get("graphspell_section", "#err"), FontDescriptor = xFDTitle)
        self.xMainDic = self._addWidget('activate_main', 'CheckBox', nX, nY1+15, nWidth, nHeight, Label = self.dUI.get('activate_main', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088, State = True)
        self._addWidget('activate_main_descr', 'FixedText', nX+10, nY1+25, nWidth-10, nHeight*2, Label = self.dUI.get('activate_main_descr', "#err"), MultiLine = True)
        self._addWidget('spelling', 'FixedText', nX+10, nY1+45, nWidth-80, nHeight, Label = self.dUI.get('spelling', "#err"), FontDescriptor = xFDSubTitle)
        self.xInfoDicButton = self._addWidget('info_dic_button', 'Button', nX+160, nY1+45, 12, 9, Label = "‹i›")
        self.xSelClassic = self._addWidget('classic', 'RadioButton', nX+10, nY1+55, 50, nHeight, Label = self.dUI.get('classic', "#err"))
        self.xSelReform = self._addWidget('reform', 'RadioButton', nX+65, nY1+55, 55, nHeight, Label = self.dUI.get('reform', "#err"))
        self.xSelAllvars = self._addWidget('allvars', 'RadioButton', nX+120, nY1+55, 60, nHeight, Label = self.dUI.get('allvars', "#err"))
        self.xExtendedDic = self._addWidget('activate_extended', 'CheckBox', nX, nY2+15, nWidth, nHeight, Label = self.dUI.get('activate_extended', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088, Enabled = False)
        self._addWidget('activate_extended_descr', 'FixedText', nX+10, nY2+25, nWidth-10, nHeight*1, Label = self.dUI.get('activate_extended_descr', "#err"), MultiLine = True)
        self.xCommunityDic = self._addWidget('activate_community', 'CheckBox', nX, nY3+15, nWidth, nHeight, Label = self.dUI.get('activate_community', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088, Enabled = False)
        self._addWidget('activate_community_descr', 'FixedText', nX+10, nY3+25, nWidth-10, nHeight*1, Label = self.dUI.get('activate_community_descr', "#err"), MultiLine = True)
        self.xPersonalDic = self._addWidget('activate_personal', 'CheckBox', nX, nY4+15, nWidth, nHeight, Label = self.dUI.get('activate_personal', "#err"), FontDescriptor = xFDSubTitle, TextColor = 0x000088)
        self._addWidget('activate_personal_descr', 'FixedText', nX+10, nY4+25, nWidth-10, nHeight*1, Label = self.dUI.get('activate_personal_descr', "#err"), MultiLine = True)
        
        # Spell suggestion engine section
        self._addWidget("suggestion_section", 'FixedLine', nX, nY5, nWidth, nHeight, Label = self.dUI.get("suggestion_section", "#err"), FontDescriptor = xFDTitle)
        self.xGraphspellSugg = self._addWidget('activate_spell_sugg', 'CheckBox', nX, nY5+15, nWidth, nHeight, Label = self.dUI.get('activate_spell_sugg', "#err"))
        self._addWidget('activate_spell_sugg_descr', 'FixedText', nX, nY5+25, nWidth, nHeight*6, Label = self.dUI.get('activate_spell_sugg_descr', "#err"), MultiLine = True)

        # Restart message
        self._addWidget('restart', 'FixedText', nX, nY6, nWidth, nHeight*2, Label = self.dUI.get('restart', "#err"), FontDescriptor = xFDTitle, MultiLine = True, TextColor = 0x880000)

        # Button
        self._addWidget('apply_button', 'Button', self.xDialog.Width-115, self.xDialog.Height-20, 50, 14, Label = self.dUI.get('apply_button', "#err"), FontDescriptor = xFDTitle, TextColor = 0x005500)
        self._addWidget('cancel_button', 'Button', self.xDialog.Width-60, self.xDialog.Height-20, 50, 14, Label = self.dUI.get('cancel_button', "#err"), FontDescriptor = xFDTitle, TextColor = 0x550000)

        self._loadOptions()

        # container
        self.xContainer = self.xSvMgr.createInstanceWithContext('com.sun.star.awt.UnoControlDialog', self.ctx)
        self.xContainer.setModel(self.xDialog)
        self.xContainer.getControl('info_dic_button').addActionListener(self)
        self.xContainer.getControl('info_dic_button').setActionCommand('InfoDic')
        self.xContainer.getControl('apply_button').addActionListener(self)
        self.xContainer.getControl('apply_button').setActionCommand('Apply')
        self.xContainer.getControl('cancel_button').addActionListener(self)
        self.xContainer.getControl('cancel_button').setActionCommand('Cancel')
        self.xContainer.setVisible(False)
        toolkit = self.xSvMgr.createInstanceWithContext('com.sun.star.awt.ExtToolkit', self.ctx)
        self.xContainer.createPeer(toolkit, None)
        self.xContainer.execute()

    # XActionListener
    def actionPerformed (self, xActionEvent):
        try:
            if xActionEvent.ActionCommand == 'Apply':
                xChild = self.xOptionNode.getByName("o_fr")


                #xChild.setPropertyValue("use_extended_dic", self.xExtendedDic.State)
                #xChild.setPropertyValue("use_community_dic", self.xCommunityDic.State)
                xChild.setPropertyValue("use_personal_dic", self.xPersonalDic.State)
                xChild.setPropertyValue("use_graphspell_sugg", self.xGraphspellSugg.State)
                sMainDicName = "classic"
                if self.xSelClassic.State:
                    sMainDicName = "classic"
                elif self.xSelReform.State:
                    sMainDicName = "reform"
                elif self.xSelAllvars.State:
                    sMainDicName = "allvars"
                xChild.setPropertyValue("main_dic_name", sMainDicName)
                self.xOptionNode.commitChanges()
                self.xContainer.endExecute()
            elif xActionEvent.ActionCommand == 'InfoDic':
                MessageBox(self.xDocument, self.dUI.get('spelling_descr', "#err"), "Orthographe du français", nBoxType=INFOBOX, nBoxButtons=BUTTONS_OK)
            else:

                self.xContainer.endExecute()
        except:
            traceback.print_exc()
    
    # XJobExecutor
    def trigger (self, args):
        try:
            dialog = DictOptions(self.ctx)
            dialog.run()
        except:
            traceback.print_exc()

    def _loadOptions (self):
        try:
            xChild = self.xOptionNode.getByName("o_fr")
            #self.xGraphspell.State = xChild.getPropertyValue("use_graphspell")
            self.xGraphspellSugg.State = xChild.getPropertyValue("use_graphspell_sugg")
            #self.xExtendedDic.State = xChild.getPropertyValue("use_extended_dic")
            #self.xCommunityDic.State = xChild.getPropertyValue("use_community_dic")
            self.xPersonalDic.State = xChild.getPropertyValue("use_personal_dic")
            sMainDicName = xChild.getPropertyValue("main_dic_name")
            if sMainDicName == "classic":
                self.xSelClassic.State = 1
            elif sMainDicName == "reform":
                self.xSelReform.State = 1
            elif sMainDicName == "allvars":
                self.xSelAllvars.State = 1
            else:
                print("Error. Unknown dictionary: " + sMainDicName)
        except:
            traceback.print_exc()


#g_ImplementationHelper = unohelper.ImplementationHelper()
#g_ImplementationHelper.addImplementation(DictOptions, 'net.grammalecte.graphspell.DictOptions', ('com.sun.star.task.Job',))

Modified gc_lang/fr/oxt/DictOptions/SearchWords.py from [2ba69c4e6b] to [75f9b461cf].

182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
            elif xActionEvent.ActionCommand == "Close":
                self.xContainer.endExecute()
        except:
            traceback.print_exc()

    def initSpellChecker (self):
        if not self.oSpellChecker:
            self.oSpellChecker = sc.SpellChecker("fr", "fr.bdic", "", "", self.oPersonalDicJSON)

    @_waitPointer
    def searchSimilar (self):
        self.initSpellChecker()
        sWord = self.xWord.Text.strip()
        if sWord:
            xGridDataModel = self.xGridModel.GridDataModel







|







182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
            elif xActionEvent.ActionCommand == "Close":
                self.xContainer.endExecute()
        except:
            traceback.print_exc()

    def initSpellChecker (self):
        if not self.oSpellChecker:
            self.oSpellChecker = sc.SpellChecker("fr", "fr-allvars.bdic", "", "", self.oPersonalDicJSON)

    @_waitPointer
    def searchSimilar (self):
        self.initSpellChecker()
        sWord = self.xWord.Text.strip()
        if sWord:
            xGridDataModel = self.xGridModel.GridDataModel

Modified gc_lang/fr/oxt/DictOptions/do_strings.py from [2814e1e1ce] to [9a9f6574c0].

9
10
11
12
13
14
15
16
17
18
19
20





21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46





47
48
49
50
51
52
53
54
55
56
57
58
59
        
        "spelling_section": "Correcteur orthographique",
        "activate_main": "Activer le correcteur orthographique de Grammalecte",
        "activate_main_descr": "Supplante le correcteur orthographique inclus dans LibreOffice (Hunspell).",

        "suggestion_section": "Moteur de suggestion orthographique",
        "activate_spell_sugg": "Moteur de suggestion de Grammalecte",
        "activate_spell_sugg_descr": "Désactivée, cette option remplace la suggestion orthographique de Grammalecte par celle fournie par LibreOffice (Hunspell). Les mots inclus dans le dictionnaire personnalisé ne seront plus inclus aux suggestions.",

        "graphspell_section": "Dictionnaires de Grammalecte (Graphspell)",
        "activate_main": "Dictionnaire principal",
        "activate_main_descr": "Environ 83 000 entrées, 500 000 flexions.\nNi éditable, ni désactivable.",





        "activate_extended": "Dictionnaire étendu",
        "activate_extended_descr": "Fonctionnalité à venir",
        "activate_community": "Dictionnaire communautaire",
        "activate_community_descr": "Fonctionnalité à venir",
        "activate_personal": "Dictionnaire personnel",
        "activate_personal_descr": "Le dictionnaire personnel est cé et édité via l’éditeur lexical.",

        "restart": "Le changement ne prendra effet qu’après le redémarrage du logiciel.",

        "apply_button": "Appliquer",
        "cancel_button": "Annuler",
    },
    "en": {
        "title": "Grammalecte · Spelling options",
        
        "spelling_section": "Spell checker",
        "activate_main": "Activate the spell checker from Grammalecte",
        "activate_main_descr": "Overrides the spell checker included in LibreOffice (Hunspell)",

        "suggestion_section": "Spell suggestion engine",
        "activate_spell_sugg": "Suggestion engine of Grammalecte",
        "activate_spell_sugg_descr": "Disactivated, this option replace the spell suggestion engine of Grammalecte by the one of LibreOffice (Hunspell). Words included in the personal dictionary won’t be included among suggestions.",

        "graphspell_section": "Grammalecte Dictionaries (Graphspell)",
        "activate_main": "Main dictionary",
        "activate_main_descr": "About 83 000 entries, 500 000 flexions.\nNot editable, not deactivable.",





        "activate_extended": "Extended dictionary",
        "activate_extended_descr": "Feature to come.",
        "activate_community": "Community dictionary",
        "activate_community_descr": "Feature to come.",
        "activate_personal": "Personal dictionary",
        "activate_personal_descr": "The personal dictionary is created and edited via the lexicon editor.",

        "restart": "The modification will be effective only after restarting the software.",

        "apply_button": "Apply",
        "cancel_button": "Cancel",
    },
}







|

|


>
>
>
>
>





|















|

|


>
>
>
>
>





|







9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
        
        "spelling_section": "Correcteur orthographique",
        "activate_main": "Activer le correcteur orthographique de Grammalecte",
        "activate_main_descr": "Supplante le correcteur orthographique inclus dans LibreOffice (Hunspell).",

        "suggestion_section": "Moteur de suggestion orthographique",
        "activate_spell_sugg": "Moteur de suggestion de Grammalecte",
        "activate_spell_sugg_descr": "Les suggestions orthographiques des mots non reconnus par le correcteur sont fournies par Grammalecte. Si ces suggestions ne vous satisfont pas (ou si c’est trop lent), vous pouvez désactiver cette option : les suggestions orthographiques seront alors fournies par le correcteur de LibreOffice. Mais, dans ce cas, les mots que vous avez ajoutés au dictionnaire personnel de Grammalecte ne pourront pas être inclus aux suggestions.",

        "graphspell_section": "Dictionnaires actifs",
        "activate_main": "Dictionnaire principal",
        "activate_main_descr": "Environ 83 000 entrées, 500 000 flexions.\nNi éditable, ni désactivable.",
        "spelling": "Orthographe",
        "spelling_descr": "Le dictionnaire “Classique” propose l’orthographe telle qu’elle est écrite aujourd’hui le plus couramment. C’est le dictionnaire recommandé. Il contient les graphies usuelles et classiques, certaines encore communément utilisées, d’autres désuètes.\n\nAvec le dictionnaire “Réforme 1990”, seule l’orthographe réformée est reconnue. Attendu que bon nombre de graphies réformées sont considérées comme erronées par beaucoup, ce dictionnaire est déconseillé. Les graphies passées dans l’usage sont déjà incluses dans le dictionnaire “Classique”.\n\nLe dictionnaire “Toutes variantes” contient toutes les graphies, classiques ou réformées, ainsi que d’autres plus rares encore. Ce dictionnaire est déconseillé à ceux qui ne connaissent pas très bien la langue française.",
        "allvars": "Toutes variantes",
        "classic": "Classique",
        "reform": "Réforme 1990",
        "activate_extended": "Dictionnaire étendu",
        "activate_extended_descr": "Fonctionnalité à venir",
        "activate_community": "Dictionnaire communautaire",
        "activate_community_descr": "Fonctionnalité à venir",
        "activate_personal": "Dictionnaire personnel",
        "activate_personal_descr": "Cable et éditable via l’éditeur lexical.",

        "restart": "Le changement ne prendra effet qu’après le redémarrage du logiciel.",

        "apply_button": "Appliquer",
        "cancel_button": "Annuler",
    },
    "en": {
        "title": "Grammalecte · Spelling options",
        
        "spelling_section": "Spell checker",
        "activate_main": "Activate the spell checker from Grammalecte",
        "activate_main_descr": "Overrides the spell checker included in LibreOffice (Hunspell)",

        "suggestion_section": "Spell suggestion engine",
        "activate_spell_sugg": "Suggestion engine of Grammalecte",
        "activate_spell_sugg_descr": "Spelling suggestions for words unrecognized by the spellchecker are provided by Grammalecte. If you aren’t satisfied by these suggestions (or if it’s too slow), you can disable this option: spelling suggestions will then be provided by the LibreOffice proofreader. In this case, words you have added in your Grammalecte’s custom dictionary won’t be included in suggestions.",

        "graphspell_section": "Active dictionaries",
        "activate_main": "Main dictionary",
        "activate_main_descr": "About 83 000 entries, 500 000 flexions.\nNot editable, not deactivable.",
        "spelling": "Spelling",
        "spelling_descr": "The dictionary “Classic” offers the French spelling as it is written nowadays most often. This is the recommended dictionary. It contains usual and classical spellings, some of them still widely used, others obsolete.\n\nWith the dictionary “Reform 1990”, only the reformed spelling is recognized. As many of reformed spellings are considered erroneous by many people, this dictionary is unadvised. Reformed spellings commonly used are already included in the “Classic” dictionary.\n\nThe dictionary “All variants” contains all spelling variants, classical and reformed, and some others even rarer. This dictionary is unadvised for those who don’t know very well the French language.",
        "allvars": "All variants",
        "classic": "Classic",
        "reform": "Reform 1990",
        "activate_extended": "Extended dictionary",
        "activate_extended_descr": "Feature to come.",
        "activate_community": "Community dictionary",
        "activate_community_descr": "Feature to come.",
        "activate_personal": "Personal dictionary",
        "activate_personal_descr": "Creatible and editable via the lexicon editor.",

        "restart": "The modification will be effective only after restarting the software.",

        "apply_button": "Apply",
        "cancel_button": "Cancel",
    },
}

Modified gc_lang/fr/oxt/Dictionnaires/dictionaries/fr-classique.aff from [34001eab6b] to [7aba573e1d].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “CLASSIQUE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “CLASSIQUE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/oxt/Dictionnaires/dictionaries/fr-moderne.aff from [e720fb281a] to [bc39751b7a].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “MODERNE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “MODERNE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/oxt/Dictionnaires/dictionaries/fr-reforme1990.aff from [e353a9ecb8] to [5fff0168b9].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “RÉFORME 1990” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “RÉFORME 1990” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/oxt/Dictionnaires/dictionaries/fr-toutesvariantes.aff from [a14cde38dd] to [4245d700cf].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “TOUTES VARIANTES” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “TOUTES VARIANTES” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/oxt/Graphspell.py from [46a0993dea] to [d8a06b777e].

52
53
54
55
56
57
58

59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
            self.sServiceName = "com.sun.star.linguistic2.SpellChecker"
            self.sImplementationName = "net.grammalecte.graphspell"
            self.tSupportedServiceNames = (self.sServiceName, )
            self.xSvMgr = ctx.ServiceManager
            self.locales = tuple([ Locale(t[0], t[1], t[2])  for t in lLocale ])
            self.xSettingNode = helpers.getConfigSetting("/org.openoffice.Lightproof_grammalecte/Other/", False)
            self.xOptionNode = self.xSettingNode.getByName("o_fr")

            personal_dic = ""
            if (self.xOptionNode.getPropertyValue("use_personal_dic")):
                sPersonalDicJSON = self.xOptionNode.getPropertyValue("personal_dic")
                if sPersonalDicJSON:
                    try:
                        personal_dic = json.loads(sPersonalDicJSON)
                    except:
                        print("Graphspell: wrong personal_dic")
                        traceback.print_exc()
            self.oGraphspell = SpellChecker("fr", "fr.bdic", "", "", personal_dic)
            self.loadHunspell()
            # print("Graphspell: init done")
        except:
            print("Graphspell: init failed")
            traceback.print_exc()
    
    def loadHunspell (self):







>









|







52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
            self.sServiceName = "com.sun.star.linguistic2.SpellChecker"
            self.sImplementationName = "net.grammalecte.graphspell"
            self.tSupportedServiceNames = (self.sServiceName, )
            self.xSvMgr = ctx.ServiceManager
            self.locales = tuple([ Locale(t[0], t[1], t[2])  for t in lLocale ])
            self.xSettingNode = helpers.getConfigSetting("/org.openoffice.Lightproof_grammalecte/Other/", False)
            self.xOptionNode = self.xSettingNode.getByName("o_fr")
            sMainDicName = self.xOptionNode.getPropertyValue("main_dic_name")
            personal_dic = ""
            if (self.xOptionNode.getPropertyValue("use_personal_dic")):
                sPersonalDicJSON = self.xOptionNode.getPropertyValue("personal_dic")
                if sPersonalDicJSON:
                    try:
                        personal_dic = json.loads(sPersonalDicJSON)
                    except:
                        print("Graphspell: wrong personal_dic")
                        traceback.print_exc()
            self.oGraphspell = SpellChecker("fr", "fr-"+sMainDicName+".bdic", "", "", personal_dic)
            self.loadHunspell()
            # print("Graphspell: init done")
        except:
            print("Graphspell: init failed")
            traceback.print_exc()
    
    def loadHunspell (self):

Modified gc_lang/fr/oxt/TextFormatter/TextFormatter.py from [31cae4e817] to [dd252c8003].

122
123
124
125
126
127
128
129
130
131
132
133
134
135
136

137
138
139
140
141
142
143
144

145
146
147
148
149
150
151
152
153
154
        self.space1 = self._addWidget('space1', 'CheckBox', x, y+15, nWidth, nHeight, Label = self.dUI.get('space1', "#err"), State = True)
        self.space2 = self._addWidget('space2', 'CheckBox', x, y+25, nWidth, nHeight, Label = self.dUI.get('space2', "#err"), State = True)
        self.space1_res = self._addWidget('space1_res', 'FixedText', nPosRes, y+15, 20, nHeight, Label = "", Align = 2)
        self.space2_res = self._addWidget('space2_res', 'FixedText', nPosRes, y+25, 20, nHeight, Label = "", Align = 2)
        
        # group box // non-breaking spaces
        x = 10; y = 145
        gbm3 = self._addWidget('groupbox3', 'GroupBox', x-5, y, nGroupBoxWith, 70, Label = "  " * len(self.dUI.get('nbsp', "#err")), FontDescriptor = xFD1)
        self.nbsp = self._addWidget('nbsp', 'CheckBox', x, y+2, nWidth, nHeight, Label = self.dUI.get('nbsp', "#err"), FontDescriptor = xFD1, \
                                    FontRelief = 1, TextColor = nColor, State = True)
        self.nbsp1 = self._addWidget('nbsp1', 'CheckBox', x, y+15, 85, nHeight, Label = self.dUI.get('nbsp1', "#err"), State = True)
        self.nbsp2 = self._addWidget('nbsp2', 'CheckBox', x, y+25, 85, nHeight, Label = self.dUI.get('nbsp2', "#err"), State = True)
        self.nbsp3 = self._addWidget('nbsp3', 'CheckBox', x, y+35, nWidth, nHeight, Label = self.dUI.get('nbsp3', "#err"), State = True)
        self.nbsp4 = self._addWidget('nbsp4', 'CheckBox', x, y+45, 85, nHeight, Label = self.dUI.get('nbsp4', "#err"), State = True)
        self.nbsp5 = self._addWidget('nbsp5', 'CheckBox', x, y+55, 85, nHeight, Label = self.dUI.get('nbsp5', "#err"), State = True)

        self.nnbsp1 = self._addWidget('nnbsp1', 'CheckBox', x+85, y+15, 30, nHeight, Label = self.dUI.get('nnbsp', "#err"), HelpText = self.dUI.get('nnbsp_help', "#err"), State = False)
        self.nnbsp2 = self._addWidget('nnbsp2', 'CheckBox', x+85, y+25, 30, nHeight, Label = self.dUI.get('nnbsp', "#err"), State = False)
        self.nnbsp4 = self._addWidget('nnbsp4', 'CheckBox', x+85, y+45, 30, nHeight, Label = self.dUI.get('nnbsp', "#err"), State = False)
        self.nbsp1_res = self._addWidget('nbsp1_res', 'FixedText', nPosRes, y+15, 20, nHeight, Label = "", Align = 2)
        self.nbsp2_res = self._addWidget('nbsp2_res', 'FixedText', nPosRes, y+25, 20, nHeight, Label = "", Align = 2)
        self.nbsp3_res = self._addWidget('nbsp3_res', 'FixedText', nPosRes, y+35, 20, nHeight, Label = "", Align = 2)
        self.nbsp4_res = self._addWidget('nbsp4_res', 'FixedText', nPosRes, y+45, 20, nHeight, Label = "", Align = 2)
        self.nbsp5_res = self._addWidget('nbsp5_res', 'FixedText', nPosRes, y+55, 20, nHeight, Label = "", Align = 2)

        
        # group box // deletion
        x = 10; y = 220
        gbm7 = self._addWidget('groupbox7', 'GroupBox', x-5, y, nGroupBoxWith, 50, Label = "  " * len(self.dUI.get('delete', "#err")), FontDescriptor = xFD1)
        self.delete = self._addWidget('delete', 'CheckBox', x, y+2, nWidth, nHeight, Label = self.dUI.get('delete', "#err"), FontDescriptor = xFD1, \
                                      FontRelief = 1, TextColor = nColor, State = True)
        self.delete1 = self._addWidget('delete1', 'CheckBox', x, y+15, nWidth, nHeight, Label = self.dUI.get('delete1', "#err"), State = True)
        self.delete2 = self._addWidget('delete2', 'CheckBox', x, y+25, nWidth, nHeight, Label = self.dUI.get('delete2', "#err"), State = True)
        self.delete2a = self._addWidget('delete2a', 'RadioButton', x+10, y+35, 50, nHeight, Label = self.dUI.get('delete2a', "#"))
        self.delete2b = self._addWidget('delete2b', 'RadioButton', x+60, y+35, 60, nHeight, Label = self.dUI.get('delete2b', "#"), State = True)







|







>








>


|







122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
        self.space1 = self._addWidget('space1', 'CheckBox', x, y+15, nWidth, nHeight, Label = self.dUI.get('space1', "#err"), State = True)
        self.space2 = self._addWidget('space2', 'CheckBox', x, y+25, nWidth, nHeight, Label = self.dUI.get('space2', "#err"), State = True)
        self.space1_res = self._addWidget('space1_res', 'FixedText', nPosRes, y+15, 20, nHeight, Label = "", Align = 2)
        self.space2_res = self._addWidget('space2_res', 'FixedText', nPosRes, y+25, 20, nHeight, Label = "", Align = 2)
        
        # group box // non-breaking spaces
        x = 10; y = 145
        gbm3 = self._addWidget('groupbox3', 'GroupBox', x-5, y, nGroupBoxWith, 80, Label = "  " * len(self.dUI.get('nbsp', "#err")), FontDescriptor = xFD1)
        self.nbsp = self._addWidget('nbsp', 'CheckBox', x, y+2, nWidth, nHeight, Label = self.dUI.get('nbsp', "#err"), FontDescriptor = xFD1, \
                                    FontRelief = 1, TextColor = nColor, State = True)
        self.nbsp1 = self._addWidget('nbsp1', 'CheckBox', x, y+15, 85, nHeight, Label = self.dUI.get('nbsp1', "#err"), State = True)
        self.nbsp2 = self._addWidget('nbsp2', 'CheckBox', x, y+25, 85, nHeight, Label = self.dUI.get('nbsp2', "#err"), State = True)
        self.nbsp3 = self._addWidget('nbsp3', 'CheckBox', x, y+35, nWidth, nHeight, Label = self.dUI.get('nbsp3', "#err"), State = True)
        self.nbsp4 = self._addWidget('nbsp4', 'CheckBox', x, y+45, 85, nHeight, Label = self.dUI.get('nbsp4', "#err"), State = True)
        self.nbsp5 = self._addWidget('nbsp5', 'CheckBox', x, y+55, 85, nHeight, Label = self.dUI.get('nbsp5', "#err"), State = True)
        self.nbsp6 = self._addWidget('nbsp6', 'CheckBox', x, y+65, 85, nHeight, Label = self.dUI.get('nbsp6', "#err"), State = True)
        self.nnbsp1 = self._addWidget('nnbsp1', 'CheckBox', x+85, y+15, 30, nHeight, Label = self.dUI.get('nnbsp', "#err"), HelpText = self.dUI.get('nnbsp_help', "#err"), State = False)
        self.nnbsp2 = self._addWidget('nnbsp2', 'CheckBox', x+85, y+25, 30, nHeight, Label = self.dUI.get('nnbsp', "#err"), State = False)
        self.nnbsp4 = self._addWidget('nnbsp4', 'CheckBox', x+85, y+45, 30, nHeight, Label = self.dUI.get('nnbsp', "#err"), State = False)
        self.nbsp1_res = self._addWidget('nbsp1_res', 'FixedText', nPosRes, y+15, 20, nHeight, Label = "", Align = 2)
        self.nbsp2_res = self._addWidget('nbsp2_res', 'FixedText', nPosRes, y+25, 20, nHeight, Label = "", Align = 2)
        self.nbsp3_res = self._addWidget('nbsp3_res', 'FixedText', nPosRes, y+35, 20, nHeight, Label = "", Align = 2)
        self.nbsp4_res = self._addWidget('nbsp4_res', 'FixedText', nPosRes, y+45, 20, nHeight, Label = "", Align = 2)
        self.nbsp5_res = self._addWidget('nbsp5_res', 'FixedText', nPosRes, y+55, 20, nHeight, Label = "", Align = 2)
        self.nbsp6_res = self._addWidget('nbsp6_res', 'FixedText', nPosRes, y+65, 20, nHeight, Label = "", Align = 2)
        
        # group box // deletion
        x = 10; y = 230
        gbm7 = self._addWidget('groupbox7', 'GroupBox', x-5, y, nGroupBoxWith, 50, Label = "  " * len(self.dUI.get('delete', "#err")), FontDescriptor = xFD1)
        self.delete = self._addWidget('delete', 'CheckBox', x, y+2, nWidth, nHeight, Label = self.dUI.get('delete', "#err"), FontDescriptor = xFD1, \
                                      FontRelief = 1, TextColor = nColor, State = True)
        self.delete1 = self._addWidget('delete1', 'CheckBox', x, y+15, nWidth, nHeight, Label = self.dUI.get('delete1', "#err"), State = True)
        self.delete2 = self._addWidget('delete2', 'CheckBox', x, y+25, nWidth, nHeight, Label = self.dUI.get('delete2', "#err"), State = True)
        self.delete2a = self._addWidget('delete2a', 'RadioButton', x+10, y+35, 50, nHeight, Label = self.dUI.get('delete2a', "#"))
        self.delete2b = self._addWidget('delete2b', 'RadioButton', x+60, y+35, 60, nHeight, Label = self.dUI.get('delete2b', "#"), State = True)
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
        self.struct3 = self._addWidget('struct3', 'CheckBox', x, y+35, nWidth, nHeight, Label = self.dUI.get('struct3', "#err"), \
                                       HelpText = self.dUI.get('struct3_help', "#err"), State = False, Enabled = False)
        self.struct1_res = self._addWidget('struct1_res', 'FixedText', nPosRes, y+15, 20, nHeight, Label = "", Align = 2)
        self.struct2_res = self._addWidget('struct2_res', 'FixedText', nPosRes, y+25, 20, nHeight, Label = "", Align = 2)
        self.struct3_res = self._addWidget('struct3_res', 'FixedText', nPosRes, y+35, 20, nHeight, Label = "", Align = 2)
        
        # dialog height
        self.xDialog.Height = 292
        xWindowSize = helpers.getWindowSize()
        self.xDialog.PositionX = int((xWindowSize.Width / 2) - (self.xDialog.Width / 2))
        self.xDialog.PositionY = int((xWindowSize.Height / 2) - (self.xDialog.Height / 2))

        # lists of checkbox widgets
        self.dCheckboxWidgets = {
            "ssp":      [self.ssp1, self.ssp2, self.ssp3, self.ssp4, self.ssp5, self.ssp6, self.ssp7],
            "space":    [self.space1, self.space2],
            "nbsp":     [self.nbsp1, self.nbsp2, self.nbsp3, self.nbsp4, self.nbsp5, self.nnbsp1, self.nnbsp2, self.nnbsp4],
            "delete":   [self.delete1, self.delete2, self.delete2a, self.delete2b, self.delete2c],
            "typo":     [self.typo1, self.typo2, self.typo3, self.typo3a, self.typo3b, self.typo4, self.typo4a, self.typo4b, self.typo5, self.typo6, \
                         self.typo7, self.typo8, self.typo8a, self.typo8b, self.typo_ff, self.typo_fi, self.typo_ffi, self.typo_fl, self.typo_ffl, \
                         self.typo_ft, self.typo_st],
            "misc":     [self.misc1, self.misc2, self.misc3, self.misc5, self.misc1a, self.misc5b, self.misc5c], #self.misc4, 
            "struct":   [self.struct1, self.struct2, self.struct3]
        }

        # progress bar
        self.pbar = self._addWidget('pbar', 'ProgressBar', 22, self.xDialog.Height-16, 210, 10)
        self.pbar.ProgressValueMin = 0
        self.pbar.ProgressValueMax = 31
        # time counter
        self.time_res = self._addWidget('time_res', 'FixedText', self.xDialog.Width-80, self.xDialog.Height-15, 20, nHeight, Label = "", Align = 2)

        # buttons
        self.bdefault = self._addWidget('default', 'Button', 5, self.xDialog.Height-19, 15, 15, Label = self.dUI.get('default', "#err"), \
                                        HelpText = self.dUI.get('default_help', "#err"), FontDescriptor = xFD2, TextColor = 0x444444)
        #self.bsel = self._addWidget('bsel', 'CheckBox', x, self.xDialog.Height-40, nWidth-55, nHeight, Label = self.dUI.get('bsel', "#err"))        







|








|











|







225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
        self.struct3 = self._addWidget('struct3', 'CheckBox', x, y+35, nWidth, nHeight, Label = self.dUI.get('struct3', "#err"), \
                                       HelpText = self.dUI.get('struct3_help', "#err"), State = False, Enabled = False)
        self.struct1_res = self._addWidget('struct1_res', 'FixedText', nPosRes, y+15, 20, nHeight, Label = "", Align = 2)
        self.struct2_res = self._addWidget('struct2_res', 'FixedText', nPosRes, y+25, 20, nHeight, Label = "", Align = 2)
        self.struct3_res = self._addWidget('struct3_res', 'FixedText', nPosRes, y+35, 20, nHeight, Label = "", Align = 2)
        
        # dialog height
        self.xDialog.Height = 302
        xWindowSize = helpers.getWindowSize()
        self.xDialog.PositionX = int((xWindowSize.Width / 2) - (self.xDialog.Width / 2))
        self.xDialog.PositionY = int((xWindowSize.Height / 2) - (self.xDialog.Height / 2))

        # lists of checkbox widgets
        self.dCheckboxWidgets = {
            "ssp":      [self.ssp1, self.ssp2, self.ssp3, self.ssp4, self.ssp5, self.ssp6, self.ssp7],
            "space":    [self.space1, self.space2],
            "nbsp":     [self.nbsp1, self.nbsp2, self.nbsp3, self.nbsp4, self.nbsp5, self.nbsp6, self.nnbsp1, self.nnbsp2, self.nnbsp4],
            "delete":   [self.delete1, self.delete2, self.delete2a, self.delete2b, self.delete2c],
            "typo":     [self.typo1, self.typo2, self.typo3, self.typo3a, self.typo3b, self.typo4, self.typo4a, self.typo4b, self.typo5, self.typo6, \
                         self.typo7, self.typo8, self.typo8a, self.typo8b, self.typo_ff, self.typo_fi, self.typo_ffi, self.typo_fl, self.typo_ffl, \
                         self.typo_ft, self.typo_st],
            "misc":     [self.misc1, self.misc2, self.misc3, self.misc5, self.misc1a, self.misc5b, self.misc5c], #self.misc4, 
            "struct":   [self.struct1, self.struct2, self.struct3]
        }

        # progress bar
        self.pbar = self._addWidget('pbar', 'ProgressBar', 22, self.xDialog.Height-16, 210, 10)
        self.pbar.ProgressValueMin = 0
        self.pbar.ProgressValueMax = 32
        # time counter
        self.time_res = self._addWidget('time_res', 'FixedText', self.xDialog.Width-80, self.xDialog.Height-15, 20, nHeight, Label = "", Align = 2)

        # buttons
        self.bdefault = self._addWidget('default', 'Button', 5, self.xDialog.Height-19, 15, 15, Label = self.dUI.get('default', "#err"), \
                                        HelpText = self.dUI.get('default_help', "#err"), FontDescriptor = xFD2, TextColor = 0x444444)
        #self.bsel = self._addWidget('bsel', 'CheckBox', x, self.xDialog.Height-40, nWidth-55, nHeight, Label = self.dUI.get('bsel', "#err"))        
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
                        n = self._replaceList(xElem, "nbsp4")
                    self.nbsp4_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.nbsp5.State:
                    n = self._replaceList(xElem, "nbsp5")
                    self.nbsp5_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if False:
                    n = self._replaceList(xElem, "nbsp6")
                    self.nbsp3_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.nbsp.State = False
                self._switchCheckBox(self.nbsp)
            self.pbar.ProgressValue = 15
            # points médians
            if self.typo.State:
                if self.typo6.State:
                    n = self._replaceList(xElem, "typo6")
                    self.typo6_res.Label = str(n)
                    self.pbar.ProgressValue += 1
            # espaces manquants
            if self.space.State:
                if self.space1.State:
                    n = self._replaceList(xElem, "space1")
                    # réparations
                    n -= self._replaceList(xElem, "space1_fix")
                    self.space1_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.space2.State:
                    n = self._replaceList(xElem, "space2")
                    self.space2_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.space.State = False
                self._switchCheckBox(self.space)
            self.pbar.ProgressValue = 17
            # Suppression
            if self.delete.State:
                if self.delete1.State:
                    n = self._replaceList(xElem, "delete1")
                    self.delete1_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.delete2.State:
                    n = self._replaceBulletsByEmDash(xElem)
                    self.delete2_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.delete.State = False
                self._switchCheckBox(self.delete)
            self.pbar.ProgressValue = 20
            # signes typographiques
            if self.typo.State:
                if self.typo1.State:
                    n = self._replaceList(xElem, "typo1")
                    self.typo1_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.typo2.State:







|

|



|




















|












|







514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
                        n = self._replaceList(xElem, "nbsp4")
                    self.nbsp4_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.nbsp5.State:
                    n = self._replaceList(xElem, "nbsp5")
                    self.nbsp5_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.nbsp6.State:
                    n = self._replaceList(xElem, "nbsp6")
                    self.nbsp6_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.nbsp.State = False
                self._switchCheckBox(self.nbsp)
            self.pbar.ProgressValue = 16
            # points médians
            if self.typo.State:
                if self.typo6.State:
                    n = self._replaceList(xElem, "typo6")
                    self.typo6_res.Label = str(n)
                    self.pbar.ProgressValue += 1
            # espaces manquants
            if self.space.State:
                if self.space1.State:
                    n = self._replaceList(xElem, "space1")
                    # réparations
                    n -= self._replaceList(xElem, "space1_fix")
                    self.space1_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.space2.State:
                    n = self._replaceList(xElem, "space2")
                    self.space2_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.space.State = False
                self._switchCheckBox(self.space)
            self.pbar.ProgressValue = 18
            # Suppression
            if self.delete.State:
                if self.delete1.State:
                    n = self._replaceList(xElem, "delete1")
                    self.delete1_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.delete2.State:
                    n = self._replaceBulletsByEmDash(xElem)
                    self.delete2_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.delete.State = False
                self._switchCheckBox(self.delete)
            self.pbar.ProgressValue = 21
            # signes typographiques
            if self.typo.State:
                if self.typo1.State:
                    n = self._replaceList(xElem, "typo1")
                    self.typo1_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                if self.typo2.State:
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
                            n += self._replaceText(xElem, "ſt", "ft", False, True)
                        if self.typo_st.State:
                            n += self._replaceText(xElem, "st", "st", False, True)
                    self.typo8_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.typo.State = False
                self._switchCheckBox(self.typo)
            self.pbar.ProgressValue = 28
            # divers
            if self.misc.State:
                if self.misc1.State:
                    if self.misc1a.State:
                        n = self._replaceList(xElem, "misc1a")
                    else:
                        n = self._replaceList(xElem, "misc1b")







|







628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
                            n += self._replaceText(xElem, "ſt", "ft", False, True)
                        if self.typo_st.State:
                            n += self._replaceText(xElem, "st", "st", False, True)
                    self.typo8_res.Label = str(n)
                    self.pbar.ProgressValue += 1
                self.typo.State = False
                self._switchCheckBox(self.typo)
            self.pbar.ProgressValue = 29
            # divers
            if self.misc.State:
                if self.misc1.State:
                    if self.misc1a.State:
                        n = self._replaceList(xElem, "misc1a")
                    else:
                        n = self._replaceList(xElem, "misc1b")

Modified gc_lang/fr/oxt/TextFormatter/tf_options.py from [2ffb289615] to [9f2476057b].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
dDefaultOpt = {
    'ssp': 1, 'ssp1': 1, 'ssp2': 1, 'ssp3': 1, 'ssp4': 1, 'ssp5': 1, 'ssp6': 1, 'ssp7': 1, 
    'nbsp': 1, 'nbsp1': 1, 'nbsp2': 1, 'nbsp3': 1, 'nbsp4': 1, 'nbsp5': 1, 'nnbsp1': 0, 'nnbsp2': 0, 'nnbsp4': 0,
    'space': 1, 'space1': 1, 'space2': 1,
    'delete': 0, 'delete1': 1, 'delete2': 1, 'delete2a': 0, 'delete2b': 1, 'delete2c': 0,
    'typo': 1, 'typo1': 1, 'typo2': 1, 'typo3': 1, 'typo4': 1, 'typo5': 1, 'typo6': 1, 'typo7': 1, 'typo8': 0, 'typo3a': 0, 'typo3b': 1, 'typo4a': 1, 'typo4b': 0, 'typo8a': 0, 'typo8b': 1, 'typo_ff': 1, 'typo_fi':1, 'typo_ffi':1, 'typo_fl':1, 'typo_ffl':1, 'typo_ft':1, 'typo_st': 1,
    'misc': 1, 'misc1': 1, 'misc2': 1, 'misc3': 1, 'misc5': 1, 'misc1a': 0, 'misc5b': 0, 'misc5c': 0,
    'struct': 0, 'struct1': 1, 'struct2': 1, 'struct3': 0,
}
dOpt = {
    'ssp': 1, 'ssp1': 1, 'ssp2': 1, 'ssp3': 1, 'ssp4': 1, 'ssp5': 1, 'ssp6': 1, 'ssp7': 1, 
    'nbsp': 1, 'nbsp1': 1, 'nbsp2': 1, 'nbsp3': 1, 'nbsp4': 1, 'nbsp5': 1, 'nnbsp1': 0, 'nnbsp2': 0, 'nnbsp4': 0,
    'space': 1, 'space1': 1, 'space2': 1,
    'delete': 0, 'delete1': 1, 'delete2': 1, 'delete2a': 0, 'delete2b': 1, 'delete2c': 0,
    'typo': 1, 'typo1': 1, 'typo2': 1, 'typo3': 1, 'typo4': 1, 'typo5': 1, 'typo6': 1, 'typo7': 1, 'typo8': 0, 'typo3a': 0, 'typo3b': 1, 'typo4a': 1, 'typo4b': 0, 'typo8a': 0, 'typo8b': 1, 'typo_ff': 1, 'typo_fi':1, 'typo_ffi':1, 'typo_fl':1, 'typo_ffl':1, 'typo_ft':1, 'typo_st': 1,
    'misc': 1, 'misc1': 1, 'misc2': 1, 'misc3': 1, 'misc5': 1, 'misc1a': 0, 'misc5b': 0, 'misc5c': 0,
    'struct': 0, 'struct1': 1, 'struct2': 1, 'struct3': 0,
}


|








|






1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
dDefaultOpt = {
    'ssp': 1, 'ssp1': 1, 'ssp2': 1, 'ssp3': 1, 'ssp4': 1, 'ssp5': 1, 'ssp6': 1, 'ssp7': 1, 
    'nbsp': 1, 'nbsp1': 1, 'nbsp2': 1, 'nbsp3': 1, 'nbsp4': 1, 'nbsp5': 1, 'nbsp6': 1, 'nnbsp1': 0, 'nnbsp2': 0, 'nnbsp4': 0,
    'space': 1, 'space1': 1, 'space2': 1,
    'delete': 0, 'delete1': 1, 'delete2': 1, 'delete2a': 0, 'delete2b': 1, 'delete2c': 0,
    'typo': 1, 'typo1': 1, 'typo2': 1, 'typo3': 1, 'typo4': 1, 'typo5': 1, 'typo6': 1, 'typo7': 1, 'typo8': 0, 'typo3a': 0, 'typo3b': 1, 'typo4a': 1, 'typo4b': 0, 'typo8a': 0, 'typo8b': 1, 'typo_ff': 1, 'typo_fi':1, 'typo_ffi':1, 'typo_fl':1, 'typo_ffl':1, 'typo_ft':1, 'typo_st': 1,
    'misc': 1, 'misc1': 1, 'misc2': 1, 'misc3': 1, 'misc5': 1, 'misc1a': 0, 'misc5b': 0, 'misc5c': 0,
    'struct': 0, 'struct1': 1, 'struct2': 1, 'struct3': 0,
}
dOpt = {
    'ssp': 1, 'ssp1': 1, 'ssp2': 1, 'ssp3': 1, 'ssp4': 1, 'ssp5': 1, 'ssp6': 1, 'ssp7': 1, 
    'nbsp': 1, 'nbsp1': 1, 'nbsp2': 1, 'nbsp3': 1, 'nbsp4': 1, 'nbsp5': 1, 'nbsp6': 1, 'nnbsp1': 0, 'nnbsp2': 0, 'nnbsp4': 0,
    'space': 1, 'space1': 1, 'space2': 1,
    'delete': 0, 'delete1': 1, 'delete2': 1, 'delete2a': 0, 'delete2b': 1, 'delete2c': 0,
    'typo': 1, 'typo1': 1, 'typo2': 1, 'typo3': 1, 'typo4': 1, 'typo5': 1, 'typo6': 1, 'typo7': 1, 'typo8': 0, 'typo3a': 0, 'typo3b': 1, 'typo4a': 1, 'typo4b': 0, 'typo8a': 0, 'typo8b': 1, 'typo_ff': 1, 'typo_fi':1, 'typo_ffi':1, 'typo_fl':1, 'typo_ffl':1, 'typo_ft':1, 'typo_st': 1,
    'misc': 1, 'misc1': 1, 'misc2': 1, 'misc3': 1, 'misc5': 1, 'misc1a': 0, 'misc5b': 0, 'misc5c': 0,
    'struct': 0, 'struct1': 1, 'struct2': 1, 'struct3': 0,
}

Modified gc_lang/fr/oxt/TextFormatter/tf_strings.py from [a143dd1d82] to [f6eccd2b02].

24
25
26
27
28
29
30

31
32
33
34
35
36
37

        "nbsp": "Espaces ~insécables",
        "nbsp1": "Avant : ; ? et !",
        "nbsp2": "Avec les guillemets « et »",
        "nbsp3": "Avant % ‰ € $ £ ¥ ˚C",
        "nbsp4": "À l’intérieur des nombres",
        "nbsp5": "Avant les unités de mesure",

        "nnbsp": "fins",
        "nnbsp_help": "sauf avec “:”",

        "delete": "Su~ppressions",
        "delete1": "Tirets conditionnels",
        "delete2": "Puces  → tirets cadratins + style :",
        "delete2a": "Standard",







>







24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

        "nbsp": "Espaces ~insécables",
        "nbsp1": "Avant : ; ? et !",
        "nbsp2": "Avec les guillemets « et »",
        "nbsp3": "Avant % ‰ € $ £ ¥ ˚C",
        "nbsp4": "À l’intérieur des nombres",
        "nbsp5": "Avant les unités de mesure",
        "nbsp6": "Après les titres de civilité",
        "nnbsp": "fins",
        "nnbsp_help": "sauf avec “:”",

        "delete": "Su~ppressions",
        "delete1": "Tirets conditionnels",
        "delete2": "Puces  → tirets cadratins + style :",
        "delete2a": "Standard",
106
107
108
109
110
111
112

113
114
115
116
117
118
119

        "nbsp": "~Non-breaking spaces ",
        "nbsp1": "Before : ; ? and !",
        "nbsp2": "With quoting marks « and »",
        "nbsp3": "Before % ‰ € $ £ ¥ ˚C",
        "nbsp4": "Within numbers",
        "nbsp5": "Before units of measurement",

        "nnbsp": "narrow",
        "nnbsp_help": "except with “:”",

        "delete": "~Deletions",
        "delete1": "Soft hyphens",
        "delete2": "Bullets  → em-dash + style:",
        "delete2a": "Standard",







>







107
108
109
110
111
112
113
114
115
116
117
118
119
120
121

        "nbsp": "~Non-breaking spaces ",
        "nbsp1": "Before : ; ? and !",
        "nbsp2": "With quoting marks « and »",
        "nbsp3": "Before % ‰ € $ £ ¥ ˚C",
        "nbsp4": "Within numbers",
        "nbsp5": "Before units of measurement",
        "nbsp6": "After titles",
        "nnbsp": "narrow",
        "nnbsp_help": "except with “:”",

        "delete": "~Deletions",
        "delete1": "Soft hyphens",
        "delete2": "Bullets  → em-dash + style:",
        "delete2a": "Standard",

Modified gc_lang/fr/oxt/TextFormatter/tf_tabrep.py from [c833d6dc1f] to [aadf88372a].

117
118
119
120
121
122
123

124


125
126
127
128
129
130
131
    "nnbsp4": [
                    ("([:digit:])[  ]([:digit:])",      "$1 $2",        True,   True)
    ],
    "nbsp5": [
                    ("(?<=[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]) ?([kcmµnd]?(?:[slgJKΩΩℓ]|m[²³]?|Wh?|Hz|dB)|[%‰]|°C)\\b", " $1", True, True)
    ],
    "nbsp6": [

                    ("\\b(MM?\\.|Mlle|Mgr) ",           "$1 ",          True,   True)


    ],

    # espaces manquants
    "space1": [
                    (";(?=[:alnum:])",                  "; ",           True,   True),
                    ("\\?(?=[A-ZÉÈÊÂÀÎ])",              "? ",           True,   True),
                    ("!(?=[:alnum:])",                  "! ",           True,   True),







>
|
>
>







117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
    "nnbsp4": [
                    ("([:digit:])[  ]([:digit:])",      "$1 $2",        True,   True)
    ],
    "nbsp5": [
                    ("(?<=[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]) ?([kcmµnd]?(?:[slgJKΩΩℓ]|m[²³]?|Wh?|Hz|dB)|[%‰]|°C)\\b", " $1", True, True)
    ],
    "nbsp6": [
                    ("\\bM(mes?|ᵐᵉˢ?|grs?|ᵍʳˢ?|lles?|ˡˡᵉˢ?|rs?|ʳˢ?|M\\.) ", "M$1 ",     True,   True),
                    ("\\bD(re?s?|ʳᵉ?ˢ?) ",                                  "D$1 ",     True,   True),
                    ("\\bP(re?s?|ʳᵉ?ˢ?) ",                                  "P$1 ",     True,   True),
                    ("\\bV(ves?|ᵛᵉˢ?) ",                                    "V$1 ",     True,   True),
    ],

    # espaces manquants
    "space1": [
                    (";(?=[:alnum:])",                  "; ",           True,   True),
                    ("\\?(?=[A-ZÉÈÊÂÀÎ])",              "? ",           True,   True),
                    ("!(?=[:alnum:])",                  "! ",           True,   True),

Modified gc_lang/fr/rules.grx from [baa55e6e2a] to [58d908ee5a].

474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
# crochets
__[s](p_points_suspension_entre_crochets)__
    \[…\] <<- ~>> *
__[s](p_mot_entre_crochets)__
    \[({w_1})\] @@1
    <<- \1.isdigit() ~>> *
    <<- __else__ and morph(\1, ":G", False) ~>> =" " + \1 + " "
    <<- __else__ ~>> _
__[s](points_suspension_entre_parenthèses)__
    \(…\)
    <<- ->> […]                 # Pour indiquer une troncature de texte, on utilise usuellement des crochets.
    <<- ~>> *

# Divers
__[i](p_FranceTV)__







|







474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
# crochets
__[s](p_points_suspension_entre_crochets)__
    \[…\] <<- ~>> *
__[s](p_mot_entre_crochets)__
    \[({w_1})\] @@1
    <<- \1.isdigit() ~>> *
    <<- __else__ and morph(\1, ":G", False) ~>> =" " + \1 + " "
    <<- __else__ and \1.isalpha() ~>> _
__[s](points_suspension_entre_parenthèses)__
    \(…\)
    <<- ->> […]                 # Pour indiquer une troncature de texte, on utilise usuellement des crochets.
    <<- ~>> *

# Divers
__[i](p_FranceTV)__
510
511
512
513
514
515
516

517
518
519
520
521
522
523
TEST: Si l’on peut comprendre que Mme S. ait voulu être prise au sérieux
TEST: C’est le b.a.-ba du métier.
TEST: qui a été le plus honnête [Rires]
TEST: Marion Maréchal-Le Pen. Afin que Maréchal ne soit pas analysé comme un impératif, “Le Pen” devient “Le_Pen”.
TEST: Car [je] deviendrai plus insaisissable que jamais.
TEST: C’est dans le dossier D:\Data
TEST: Dossier C:\Program Files (x86)\LibreOffice




!!!
!!!
!!! Processeur: balises HTML et LaTeX                                                               
!!!







>







510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
TEST: Si l’on peut comprendre que Mme S. ait voulu être prise au sérieux
TEST: C’est le b.a.-ba du métier.
TEST: qui a été le plus honnête [Rires]
TEST: Marion Maréchal-Le Pen. Afin que Maréchal ne soit pas analysé comme un impératif, “Le Pen” devient “Le_Pen”.
TEST: Car [je] deviendrai plus insaisissable que jamais.
TEST: C’est dans le dossier D:\Data
TEST: Dossier C:\Program Files (x86)\LibreOffice
TEST: [1] Dossier à revoir.



!!!
!!!
!!! Processeur: balises HTML et LaTeX                                                               
!!!
1651
1652
1653
1654
1655
1656
1657
1658



1659
1660
1661
1662
1663
1664
1665
__[i]/tu(tu_arc_en_ciel)__                  arcs? en ciel <<- ->> =\0.replace(" ", "-")             # Il manque les traits d’union.
__[i]/tu(tu_après_demain)__                 après demain <<- ->> après-demain                       # Il manque un trait d’union.
__[i]/tu(tu_au_préposition)__               au (delà|dehors|desso?us|devant) @@$ <<- ->> au-\1      # Il manque un trait d’union.
__[i]/tu(tu_avant_hier)__                   avant hier <<- ->> avant-hier                           # Il manque un trait d’union.
__[i]/tu(tu_bouche_à_oreille_bouche)__      bouche à (?:bouche|oreilles?) <<- morph(word(-1), ":D", False) ->> =\0.replace(" ", "-") # Il manque les traits d’union.
__[i]/tu(tu_c_est_à_dire)__                 c’est [àa] dire <<- ->> c’est-à-dire                    # Il manque les traits d’union.
__[i]/tu(tu_chef_d_œuvre_lieu)__            chef (lieu|d’œuvre) @@$ <<- ->> chef-\1                 # Il manque un trait d’union.
__[i]/tu(tu_celui_celle_là_ci)__    ce(?:lles?|lui|ux) (?:là|[cs]i) <<- ->> =\0.replace(" ", "-").replace("si", "ci")     # Il manque un trait d’union.



__[i]/tu(tu_centre_ville)__                 centres? villes? <<- ->> centre-ville|centres-villes    # Il manque un trait d’union.
__[i]/tu(tu_ci_dessous_devant_contre)__     ci (desso?us|devant|contre) @@$ <<- ->> ci-\1           # Il manque un trait d’union.
__[i]/tu(tu_de_ci_de_là)__                  de ci,? de là <<- ->> de-ci de-là|de-ci, de-là          # Il manque les traits d’union.
__[i]/tu(tu_en_contre_bas_partie)__     en contre (bas|partie) @@$ <<- ->> en contre\1|en contre-\1 # Mettez un trait d’union ou soudez.
__[i]/tu(tu_en_contrepoint)__           en (contre[- ]point) @@$ <<- -1>> contrepoint               # Soudez.
__[i]/tu(tu_état_major)__               état major <<- ->> état-major                               # Il manque un trait d’union.
__[i]/tu(tu_grand_chose)__              grand chose <<- ->> grand-chose                             # Il manque un trait d’union.







|
>
>
>







1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
__[i]/tu(tu_arc_en_ciel)__                  arcs? en ciel <<- ->> =\0.replace(" ", "-")             # Il manque les traits d’union.
__[i]/tu(tu_après_demain)__                 après demain <<- ->> après-demain                       # Il manque un trait d’union.
__[i]/tu(tu_au_préposition)__               au (delà|dehors|desso?us|devant) @@$ <<- ->> au-\1      # Il manque un trait d’union.
__[i]/tu(tu_avant_hier)__                   avant hier <<- ->> avant-hier                           # Il manque un trait d’union.
__[i]/tu(tu_bouche_à_oreille_bouche)__      bouche à (?:bouche|oreilles?) <<- morph(word(-1), ":D", False) ->> =\0.replace(" ", "-") # Il manque les traits d’union.
__[i]/tu(tu_c_est_à_dire)__                 c’est [àa] dire <<- ->> c’est-à-dire                    # Il manque les traits d’union.
__[i]/tu(tu_chef_d_œuvre_lieu)__            chef (lieu|d’œuvre) @@$ <<- ->> chef-\1                 # Il manque un trait d’union.
__[i]/tu(tu_celui_celle_là_ci)__
    ce(?:lles?|lui|ux) (?:là|[cs]i)
    <<- not (\0.endswith("si") and morph(word(1), ":[AW]", False))
    ->> =\0.replace(" ", "-").replace("si", "ci")                                                   # Il manque un trait d’union.
__[i]/tu(tu_centre_ville)__                 centres? villes? <<- ->> centre-ville|centres-villes    # Il manque un trait d’union.
__[i]/tu(tu_ci_dessous_devant_contre)__     ci (desso?us|devant|contre) @@$ <<- ->> ci-\1           # Il manque un trait d’union.
__[i]/tu(tu_de_ci_de_là)__                  de ci,? de là <<- ->> de-ci de-là|de-ci, de-là          # Il manque les traits d’union.
__[i]/tu(tu_en_contre_bas_partie)__     en contre (bas|partie) @@$ <<- ->> en contre\1|en contre-\1 # Mettez un trait d’union ou soudez.
__[i]/tu(tu_en_contrepoint)__           en (contre[- ]point) @@$ <<- -1>> contrepoint               # Soudez.
__[i]/tu(tu_état_major)__               état major <<- ->> état-major                               # Il manque un trait d’union.
__[i]/tu(tu_grand_chose)__              grand chose <<- ->> grand-chose                             # Il manque un trait d’union.
1737
1738
1739
1740
1741
1742
1743

1744
1745
1746
1747
1748
1749
1750
TEST: Il a été nommé {{vice président}}
TEST: Que vas-tu faire {{vis à vis}} d’eux              ->> vis-à-vis
TEST: un super {{week end}}                             ->> week-end
TEST: ils sont partis {{outre mer}}
TEST: elles sont allées au {{sud ouest}}
TEST: {{nord est}}
TEST: des {{stock options}}



# est-ce … ?
__[i]/tu(tu_est_ce)__
    (?<![cCdDlL][’'])(est ce) ({w_2})  @@0,$
    <<- morphex(\2, ":", ":N.*:[me]:[si]|>qui ") and morph(word(-1), ":Cs", False, True)
    -1>> est-ce                                                                                     # S’il s’agit d’une interrogation, il manque un trait d’union.







>







1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
TEST: Il a été nommé {{vice président}}
TEST: Que vas-tu faire {{vis à vis}} d’eux              ->> vis-à-vis
TEST: un super {{week end}}                             ->> week-end
TEST: ils sont partis {{outre mer}}
TEST: elles sont allées au {{sud ouest}}
TEST: {{nord est}}
TEST: des {{stock options}}
TEST: Un autre chantier important, celui si sensible de la préservation des données personnelles des élèves


# est-ce … ?
__[i]/tu(tu_est_ce)__
    (?<![cCdDlL][’'])(est ce) ({w_2})  @@0,$
    <<- morphex(\2, ":", ":N.*:[me]:[si]|>qui ") and morph(word(-1), ":Cs", False, True)
    -1>> est-ce                                                                                     # S’il s’agit d’une interrogation, il manque un trait d’union.
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
TEST: A bientôt fini son devoir.
TEST: A priori, nul ne peut y parvenir sans une aide extérieure.
TEST: A devient notre meilleure chance d’y parvenir.


!!!! Accentuation des majuscules                                                                    

__[i]/maj(maj_accents)__
    E(?:tat|glise|co(?:le|nomie)|quipe|té)s? @@1
    <<- ->> ="É"+\0[0:1]                                                                            # Accentuez les majuscules.
    <<- ~>> ="É"+\0[0:1]

TEST: Le budget de l’{{Etat}}.



!!!
!!!







|
|
|
|







1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
TEST: A bientôt fini son devoir.
TEST: A priori, nul ne peut y parvenir sans une aide extérieure.
TEST: A devient notre meilleure chance d’y parvenir.


!!!! Accentuation des majuscules                                                                    

__[u]/maj(maj_accents)__
    E(?:tat|glise|co(?:le|nomie)|quipe|lectri(?:cité|que)|gal(?:ité|ement)|té)s? @@1
    <<- ->> ="É"+\0[1:]                                                                            # Accentuez les majuscules.
    <<- ~>> ="É"+\0[1:]

TEST: Le budget de l’{{Etat}}.



!!!
!!!
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
__[i]/ocr(ocr_même3)__      mémos? <<- ->> =\0.replace("é", "ê").replace("É", "Ê")                  # Erreur de numérisation ?

TEST: __ocr__ J’en ai assez {{môme}} si ça ne se voit pas.
TEST: __ocr__ Ces {{mèmes}} hommes qui nous ont lâchés au pire moment
TEST: __ocr__ C’est l’incessant retour du {{mémo}}.


# mot / mol
__[i]/ocr(ocr_mot)__
    mols?
    <<- ->> =\0.replace("l", "t").replace("L", "T")                                                 # Erreur de numérisation ?

TEST: __ocr__ c’est un {{mol}} très dur.


# mon / won
__[i]/ocr(ocr_mon)__        won <<- ->> mon                                                         # Erreur de numérisation ?








|


|







2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
__[i]/ocr(ocr_même3)__      mémos? <<- ->> =\0.replace("é", "ê").replace("É", "Ê")                  # Erreur de numérisation ?

TEST: __ocr__ J’en ai assez {{môme}} si ça ne se voit pas.
TEST: __ocr__ Ces {{mèmes}} hommes qui nous ont lâchés au pire moment
TEST: __ocr__ C’est l’incessant retour du {{mémo}}.


# mot / mol / moi
__[i]/ocr(ocr_mot)__
    mols?
    <<- ->> =\0.replace("l", "t").replace("L", "T")+"|"+\0.replace("l", "i").replace("L", "I")      # Erreur de numérisation ?

TEST: __ocr__ c’est un {{mol}} très dur.


# mon / won
__[i]/ocr(ocr_mon)__        won <<- ->> mon                                                         # Erreur de numérisation ?

4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287

TEST: {{qu’elle}} emmerdeuse.


__[i]/conf(conf_qu_elle_verbe)__
    (quelles?) +({w_1})  @@0,$
    <<- \2.islower() and (morphex(\2, ":V|>(?:ne?|me?|te?|se?|[nv]ous|l(?:e|a|es|ui|leur|)|en|y) ", ":[NA].*:[fe]|>(?:plus|moins)") or \2 == "t" or \2 == "s")
        and not (\2 == "en" and morph(word(1), ":V0e", False)) >>>
    <<- \1.endswith("e") and not morph(\2, ":V0e", False) -1>> qu’elle                              # Confusion. Le sujet “elle” doit être séparée de la conjonction “que”. 1
    <<- __else__ and \1.endswith("s") and not morph(\2, ":V0e", False) -1>> qu’elles                # Confusion. Le sujet “elles” doit être séparée de la conjonction “que”. 2
    <<- __else__ and morph(\2, ":V0e", False) and morphex(word(1), ":[QA]", ":G", False) >>>
    <<- \1.endswith("e") -1>> qu’elle                                                               # Confusion. Le sujet “elle” doit être séparée de la conjonction “que”. 3
    <<- __else__ and \1.endswith("s") -1>> qu’elles                                                 # Confusion. Le sujet “elles” doit être séparée de la conjonction “que”. 4

TEST: Je sais {{quelle}} est partie.







|







4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292

TEST: {{qu’elle}} emmerdeuse.


__[i]/conf(conf_qu_elle_verbe)__
    (quelles?) +({w_1})  @@0,$
    <<- \2.islower() and (morphex(\2, ":V|>(?:ne?|me?|te?|se?|[nv]ous|l(?:e|a|es|ui|leur|)|en|y) ", ":[NA].*:[fe]|>(?:plus|moins)") or \2 == "t" or \2 == "s")
        and not (morph(\2, ">(?:pouvoir|devoir|en)", False) and morph(word(1), ":V0e", False)) >>>
    <<- \1.endswith("e") and not morph(\2, ":V0e", False) -1>> qu’elle                              # Confusion. Le sujet “elle” doit être séparée de la conjonction “que”. 1
    <<- __else__ and \1.endswith("s") and not morph(\2, ":V0e", False) -1>> qu’elles                # Confusion. Le sujet “elles” doit être séparée de la conjonction “que”. 2
    <<- __else__ and morph(\2, ":V0e", False) and morphex(word(1), ":[QA]", ":G", False) >>>
    <<- \1.endswith("e") -1>> qu’elle                                                               # Confusion. Le sujet “elle” doit être séparée de la conjonction “que”. 3
    <<- __else__ and \1.endswith("s") -1>> qu’elles                                                 # Confusion. Le sujet “elles” doit être séparée de la conjonction “que”. 4

TEST: Je sais {{quelle}} est partie.
4297
4298
4299
4300
4301
4302
4303


4304
4305
4306
4307
4308
4309
4310
TEST: {{Quelles}} m’engueulent encore une seule fois et elles vont le regretter.
TEST: Je crois {{quelle}} est partie.
TEST: il pense {{quelles}} sont devenues dangereuses.
TEST: Quelle est sa passion ?
TEST: Quelles sont leurs principales études ?
TEST: Quelles en sont les conséquences ?
TEST: Quelle plus belle complicité que…




# savoir / ignorer
__[i]/conf(être_pas_sans_savoir)__
    ({etre}) pas sans (ignor(?:e[rz]|ée?s?|ai[st]))  @@0,$
    <<- morph(\1, ":V0e", False)
    -2>> savoir     # Confusion : vous écrivez l’inverse de ce que vous voulez dire.|http://fr.wiktionary.org/wiki/vous_n%E2%80%99%C3%AAtes_pas_sans_savoir







>
>







4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
TEST: {{Quelles}} m’engueulent encore une seule fois et elles vont le regretter.
TEST: Je crois {{quelle}} est partie.
TEST: il pense {{quelles}} sont devenues dangereuses.
TEST: Quelle est sa passion ?
TEST: Quelles sont leurs principales études ?
TEST: Quelles en sont les conséquences ?
TEST: Quelle plus belle complicité que…
TEST: Quelle peut être la date de clôture d’un exercice ?
TEST: Quelle doit être la date du mariage ?


# savoir / ignorer
__[i]/conf(être_pas_sans_savoir)__
    ({etre}) pas sans (ignor(?:e[rz]|ée?s?|ai[st]))  @@0,$
    <<- morph(\1, ":V0e", False)
    -2>> savoir     # Confusion : vous écrivez l’inverse de ce que vous voulez dire.|http://fr.wiktionary.org/wiki/vous_n%E2%80%99%C3%AAtes_pas_sans_savoir
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
__[i](p_quelle_qu_en_soit_la_qqch)__        quelle qu en soit la (?:cause|raison) <<- ~>> *
__[i](p_quelque_qqch)__         quelque(?: (?:part|temps)|s fois) <<- ~>> *
__[i](p_quelques_tps_adv)__     quelques (?:instants|secondes|minutes|heures|jours|semaines|mois|années|décennies|siècles|millénaires|trimestres|semestres) (?:auparavant|plus (?:tard|tôt)) <<- ~>> *
__[i](p_qui_plus_est)__         qui plus est <<- ~>> *
__[i](p_qui_loc_tps)__          qui (ce (?:jour|matin|après-midi|soir)-là|cette (?:nuit|matinée|soirée)-là) @@4 <<- ~1>> *
__[i](p_quoi_qu_il_qqch)__      quoi qu il (?:(?:arriv|en co[ûu]t)(?:e|ât)|adv(?:ienne|înt)) <<- ~>> *
__[i](p_sans_difficulté)__      sans (?:grande|grosse) difficulté(?: apparente| aucune| financière| majeure| particulière|) <<- ~>> *
__[i](p_sans_qqch)__            sans (?:ambages|arrêt|au(?:cun doute|tre forme de procès)|cesse|commune mesure|conteste|coup férir|crier gare|difficulté(?: apparente| aucune| financière| majeure| particulière|)|dire mot|doute|encombres?|états d’âme|fin|foi,? ni loi|l’ombre d’un doute|le (?:faire exprès|vouloir)|mot dire|nul doute|queue ni tête|raison apparente|relâche|répit|(?:grand |)succès|trêve|(?:pour autant |)y (?:prendre g(?:arde|o[ûu]t)|faire attention|parvenir|réussir|réfléchir|songer|penser)|faire de vagues|s’en (?:rendre compte|apercevoir)|l’aide de personne) <<- ~>> *
__[i](p_séance_tenante)__       séance tenante <<- ~>> *
__[i](p_selon_qqch)__           selon (?:toute vraisemblance|(?:[mt]oi|lui|elles?|eux|nous|vous)(?! qui)) <<- ~>> *
__[i](p_semble_t_il)__          sembl(?:e-t-il|ait-il) <<- ~>> *
__[i](p_sens_dessus_dessous)__  sens dessus dessous <<- ~>> *
__[i](p_seul_à_seul)__          seule?s? à seule?s? <<- ~>> *
__[i](p_stp_svp)__              s’il (?:te|vous) pla[îi]t <<- ~>> *
__[i](p_si_qqch)__              si (?:bas|besoin est|haut|longtemps|nécessaire|possible|soudain|(?:cela|ça) ne tenait qu à (?:moi|toi|lui|eux|elles?|nous|vous)) <<- ~>> *







|







5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
__[i](p_quelle_qu_en_soit_la_qqch)__        quelle qu en soit la (?:cause|raison) <<- ~>> *
__[i](p_quelque_qqch)__         quelque(?: (?:part|temps)|s fois) <<- ~>> *
__[i](p_quelques_tps_adv)__     quelques (?:instants|secondes|minutes|heures|jours|semaines|mois|années|décennies|siècles|millénaires|trimestres|semestres) (?:auparavant|plus (?:tard|tôt)) <<- ~>> *
__[i](p_qui_plus_est)__         qui plus est <<- ~>> *
__[i](p_qui_loc_tps)__          qui (ce (?:jour|matin|après-midi|soir)-là|cette (?:nuit|matinée|soirée)-là) @@4 <<- ~1>> *
__[i](p_quoi_qu_il_qqch)__      quoi qu il (?:(?:arriv|en co[ûu]t)(?:e|ât)|adv(?:ienne|înt)) <<- ~>> *
__[i](p_sans_difficulté)__      sans (?:grande|grosse) difficulté(?: apparente| aucune| financière| majeure| particulière|) <<- ~>> *
__[i](p_sans_qqch)__            sans (?:ambages|arrêt|au(?:cun doute|tre forme de procès)|cesse|commune mesure|conteste|coup férir|crier gare|difficulté(?: apparente| aucune| financière| majeure| particulière|)|dire mot|doute|encombres?|états d’âme|fin|foi,? ni loi|l’ombre d’un doute|le (?:faire exprès|vouloir)|mot dire|nul doute|queue ni tête|raison apparente|relâche|répit|(?:grand |)succès|trêve|vergogne|(?:pour autant |)y (?:prendre g(?:arde|o[ûu]t)|faire attention|parvenir|réussir|réfléchir|songer|penser)|faire de vagues|s’en (?:rendre compte|apercevoir)|l’aide de personne) <<- ~>> *
__[i](p_séance_tenante)__       séance tenante <<- ~>> *
__[i](p_selon_qqch)__           selon (?:toute vraisemblance|(?:[mt]oi|lui|elles?|eux|nous|vous)(?! qui)) <<- ~>> *
__[i](p_semble_t_il)__          sembl(?:e-t-il|ait-il) <<- ~>> *
__[i](p_sens_dessus_dessous)__  sens dessus dessous <<- ~>> *
__[i](p_seul_à_seul)__          seule?s? à seule?s? <<- ~>> *
__[i](p_stp_svp)__              s’il (?:te|vous) pla[îi]t <<- ~>> *
__[i](p_si_qqch)__              si (?:bas|besoin est|haut|longtemps|nécessaire|possible|soudain|(?:cela|ça) ne tenait qu à (?:moi|toi|lui|eux|elles?|nous|vous)) <<- ~>> *
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
__[i]/gn(gn_2m_les)__
    (?<!et |ou )(les) +({w_2}) +({w_2})  @@0,w,$
    <<- morph(\1, ":D", False) >>>
    <<- \2 != "fois" and not \3.startswith("seul")
        and ((morphex(\2, ":[NAQ].*:m", ":(?:B|e|G|V0|f)") and morph(\3, ":[NAQ].*:f")) or (morphex(\2, ":[NAQ].*:f", ":(?:B|e|G|V0|m)") and morph(\3, ":[NAQ].*:m")))
        and not apposition(\2, \3) and not before(r"\b(?:et|ou|de) +$")
    -3>> =switchGender(@, True)                                                                     # Accord de genre erroné entre « \2 » et « \3 ».
    <<- __also__ and hasFemForm(\2) -1>> =switchGender(@, True)                                     # Accord de genre erroné avec « \3 ».
    <<- \2 != "fois" and not \3.startswith("seul")
        and morph(\2, ":[NAQ].*:[pi]", False) and morph(\3, ":[NAQ].*:s")
        and not apposition(\2, \3) and not (after_chk1(r"^ +et +(\w[\w-]+)", ":A") or after_chk1(r"^ *, +(\w[\w-]+)", ":A.*:[si]"))
        and not before(r"(?i)\bune? de ")
    -3>> =suggPlur(@)                                                                               # Accord de nombre erroné avec « \2 » : « \3 » devrait être au pluriel.
__[i]/gn(gn_2m_les_après_et_ou_de)__
    (?:et|ou) +(les) +({w_2}) +({w_2})  @@w,w,$
    <<- morph(\1, ":D", False) >>>
    <<- \2 != "fois" and not \3.startswith("seul")
        and ((morphex(\2, ":[NAQ].*:m", ":(?:B|e|G|V0|f)") and morph(\3, ":[NAQ].*:f")) or (morphex(\2, ":[NAQ].*:f", ":(?:B|e|G|V0|m)") and morph(\3, ":[NAQ].*:m")))
        and not apposition(\2, \3)
        and not morph(word(-1), ":[NAQ]|>(?:et|ou) ", False, False)
    -3>> =switchGender(@, True)                                                                     # Accord de genre erroné entre « \2 » et « \3 ».
    <<- __also__ and hasFemForm(\2) -1>> =switchGender(@, True)                                     # Accord de genre erroné avec « \3 ».
    <<- \2 != "fois" and not \3.startswith("seul")
        and morph(\2, ":[NAQ].*:[pi]", False) and morph(\3, ":[NAQ].*:s")
        and not apposition(\2, \3) and not (after_chk1(r"^ +et +(\w[\w-]+)", ":A") or after_chk1(r"^ *, +(\w[\w-]+)", ":A.*:[si]"))
        and not ( before(r"(?i)\bune? de ") or (\0.startswith("de") and before(r"(?i)\bune? +$")) )
    -3>> =suggPlur(@)                                                                               # Accord de nombre erroné avec « \2 » : « \3 » devrait être au pluriel.

TEST: les pitres {{imbécile}}







|













|







6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
__[i]/gn(gn_2m_les)__
    (?<!et |ou )(les) +({w_2}) +({w_2})  @@0,w,$
    <<- morph(\1, ":D", False) >>>
    <<- \2 != "fois" and not \3.startswith("seul")
        and ((morphex(\2, ":[NAQ].*:m", ":(?:B|e|G|V0|f)") and morph(\3, ":[NAQ].*:f")) or (morphex(\2, ":[NAQ].*:f", ":(?:B|e|G|V0|m)") and morph(\3, ":[NAQ].*:m")))
        and not apposition(\2, \3) and not before(r"\b(?:et|ou|de) +$")
    -3>> =switchGender(@, True)                                                                     # Accord de genre erroné entre « \2 » et « \3 ».
    <<- __also__ and hasFemForm(\2) -2>> =switchGender(@, True)                                     # Accord de genre erroné avec « \3 ».
    <<- \2 != "fois" and not \3.startswith("seul")
        and morph(\2, ":[NAQ].*:[pi]", False) and morph(\3, ":[NAQ].*:s")
        and not apposition(\2, \3) and not (after_chk1(r"^ +et +(\w[\w-]+)", ":A") or after_chk1(r"^ *, +(\w[\w-]+)", ":A.*:[si]"))
        and not before(r"(?i)\bune? de ")
    -3>> =suggPlur(@)                                                                               # Accord de nombre erroné avec « \2 » : « \3 » devrait être au pluriel.
__[i]/gn(gn_2m_les_après_et_ou_de)__
    (?:et|ou) +(les) +({w_2}) +({w_2})  @@w,w,$
    <<- morph(\1, ":D", False) >>>
    <<- \2 != "fois" and not \3.startswith("seul")
        and ((morphex(\2, ":[NAQ].*:m", ":(?:B|e|G|V0|f)") and morph(\3, ":[NAQ].*:f")) or (morphex(\2, ":[NAQ].*:f", ":(?:B|e|G|V0|m)") and morph(\3, ":[NAQ].*:m")))
        and not apposition(\2, \3)
        and not morph(word(-1), ":[NAQ]|>(?:et|ou) ", False, False)
    -3>> =switchGender(@, True)                                                                     # Accord de genre erroné entre « \2 » et « \3 ».
    <<- __also__ and hasFemForm(\2) -2>> =switchGender(@, True)                                     # Accord de genre erroné avec « \3 ».
    <<- \2 != "fois" and not \3.startswith("seul")
        and morph(\2, ":[NAQ].*:[pi]", False) and morph(\3, ":[NAQ].*:s")
        and not apposition(\2, \3) and not (after_chk1(r"^ +et +(\w[\w-]+)", ":A") or after_chk1(r"^ *, +(\w[\w-]+)", ":A.*:[si]"))
        and not ( before(r"(?i)\bune? de ") or (\0.startswith("de") and before(r"(?i)\bune? +$")) )
    -3>> =suggPlur(@)                                                                               # Accord de nombre erroné avec « \2 » : « \3 » devrait être au pluriel.

TEST: les pitres {{imbécile}}
9886
9887
9888
9889
9890
9891
9892
9893
9894
9895
9896
9897
9898
9899

9900
9901
9902
9903
9904
9905
9906
TEST: Ç’avait été {{horribles}}


__[i]/ppas(ppas_ça_verbe)__
    (ça|ce(?:la|ci)|celui-(?:ci|là)) +(?:ne +|n’|)((?:es|étai|f[uû]|se[mr]|soi|par|dev|re(?:dev|st))\w+|a(?:it|vait|ura(?:it|)|) +été|e[uû]t +été) +({w_2})  @@0,w,$
    <<- (morph(\2, ">(?:être|sembler|devenir|re(?:ster|devenir)|para[îi]tre) ", False) or \2.endswith(" été"))
        and ( morphex(\3, ":[NAQ].*:p", ":[GWYsi]") or ( morphex(\3, ":[AQ].*:f", ":[GWYme]") and not morph(word(1), ":N.*:f", False, False) ) )
        and not morph(word(-1), ":(?:R|V...t)", False, False)
    -3>> =suggMasSing(@)                                                     # Accord avec le sujet « \1 » : « \3 » devrait être au masculin singulier.

TEST: ça semble {{perdus}}
TEST: cela paraît {{incroyables}}
TEST: Je n’arrêtais pas de me répéter que tout cela était peut-être pure imagination
TEST: La femme qui faisait ça est partie.



__[i]/ppas(ppas_lequel_verbe)__
    (lequel) +(?:ne +|n’|)((?:es|étai|f[uû]|se[mr]|soi|par|dev|re(?:dev|st))\w+|a(?:it|vait|ura(?:it|)|) +été|e[uû]t +été) +({w_2})  @@0,w,$
    <<- (morph(\2, ">(?:être|sembler|devenir|re(?:ster|devenir)|para[îi]tre) ", False) or \2.endswith(" été"))
        and ( morphex(\3, ":[NAQ].*:p", ":[GWYsi]") or ( morphex(\3, ":[AQ].*:f", ":[GWYme]") and not morph(word(1), ":N.*:f", False, False) ) )
        and not morph(word(-1), ":R", False, False)







|






>







9893
9894
9895
9896
9897
9898
9899
9900
9901
9902
9903
9904
9905
9906
9907
9908
9909
9910
9911
9912
9913
9914
TEST: Ç’avait été {{horribles}}


__[i]/ppas(ppas_ça_verbe)__
    (ça|ce(?:la|ci)|celui-(?:ci|là)) +(?:ne +|n’|)((?:es|étai|f[uû]|se[mr]|soi|par|dev|re(?:dev|st))\w+|a(?:it|vait|ura(?:it|)|) +été|e[uû]t +été) +({w_2})  @@0,w,$
    <<- (morph(\2, ">(?:être|sembler|devenir|re(?:ster|devenir)|para[îi]tre) ", False) or \2.endswith(" été"))
        and ( morphex(\3, ":[NAQ].*:p", ":[GWYsi]") or ( morphex(\3, ":[AQ].*:f", ":[GWYme]") and not morph(word(1), ":N.*:f", False, False) ) )
        and not morph(word(-1), ":(?:R|V...t)|>de ", False, False)
    -3>> =suggMasSing(@)                                                     # Accord avec le sujet « \1 » : « \3 » devrait être au masculin singulier.

TEST: ça semble {{perdus}}
TEST: cela paraît {{incroyables}}
TEST: Je n’arrêtais pas de me répéter que tout cela était peut-être pure imagination
TEST: La femme qui faisait ça est partie.
TEST: De cela a toujours été faite notre vie


__[i]/ppas(ppas_lequel_verbe)__
    (lequel) +(?:ne +|n’|)((?:es|étai|f[uû]|se[mr]|soi|par|dev|re(?:dev|st))\w+|a(?:it|vait|ura(?:it|)|) +été|e[uû]t +été) +({w_2})  @@0,w,$
    <<- (morph(\2, ">(?:être|sembler|devenir|re(?:ster|devenir)|para[îi]tre) ", False) or \2.endswith(" été"))
        and ( morphex(\3, ":[NAQ].*:p", ":[GWYsi]") or ( morphex(\3, ":[AQ].*:f", ":[GWYme]") and not morph(word(1), ":N.*:f", False, False) ) )
        and not morph(word(-1), ":R", False, False)
11279
11280
11281
11282
11283
11284
11285
11286
11287
11288
11289
11290

11291
11292
11293
11294

11295
11296
11297
11298
11299
11300
11301


TEST: j’ai peur qu’il ne leur {{face}} quelque chose de mal


#### CONFUSION ÊTRE / AVOIR
__[i]/conf(conf_ait_confiance_été_faim_tort)__
    (?<!’)(est?) +(con(?:fiance|science)|envie|été|p(?:eine|eur)|faim|h(?:âte|onte)|recours|soif|tort)  @@0,$
    <<- not before(r"(?i)\b(?:ce que?|tout) ")
    -1>> ait|aie                                                      # Confusion probable dans l’expression « avoir \2 ». Vous utilisez ici le verbe « être ».

TEST: il est posssible qu’il {{est}} confiance en toi

TEST: La révolution est crainte.
TEST: Je n’en ai cure.
TEST: Notre communauté vous est redevable.
TEST: l’humour est affaire de culture



#### CONFUSION veillez/veuillez                                                                     

__[i]/conf(conf_veillez2)__
    (veuillez) +à +(ne|{infi})  @@0,$
    <<- isStart() and morph(\2, ":Y|>ne ", False) -1>> veillez          # Confusion probable : “veuillez” est une forme conjuguée du verbe “vouloir”.|http://bdl.oqlf.gouv.qc.ca/bdl/gabarit_bdl.asp?id=1939







|




>




>







11287
11288
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302
11303
11304
11305
11306
11307
11308
11309
11310
11311


TEST: j’ai peur qu’il ne leur {{face}} quelque chose de mal


#### CONFUSION ÊTRE / AVOIR
__[i]/conf(conf_ait_confiance_été_faim_tort)__
    (?<!’)(est?) +(con(?:fiance|science)|envie|été|p(?:eine [àa]|eur)|faim|h(?:âte|onte)|recours|soif|tort)  @@0,$
    <<- not before(r"(?i)\b(?:ce que?|tout) ")
    -1>> ait|aie                                                      # Confusion probable dans l’expression « avoir \2 ». Vous utilisez ici le verbe « être ».

TEST: il est posssible qu’il {{est}} confiance en toi
TEST: Je crains qu’il {{est}} peine à trouver le bonheur.
TEST: La révolution est crainte.
TEST: Je n’en ai cure.
TEST: Notre communauté vous est redevable.
TEST: l’humour est affaire de culture
TEST: Aller chercher l’air pur à la campagne est peine perdue.


#### CONFUSION veillez/veuillez                                                                     

__[i]/conf(conf_veillez2)__
    (veuillez) +à +(ne|{infi})  @@0,$
    <<- isStart() and morph(\2, ":Y|>ne ", False) -1>> veillez          # Confusion probable : “veuillez” est une forme conjuguée du verbe “vouloir”.|http://bdl.oqlf.gouv.qc.ca/bdl/gabarit_bdl.asp?id=1939
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
    <<- morphex(\2, ":V", ":(?:G|2p|3p!|[ISK].*:2s)")
    -2>> =suggVerb(@, ":2s")                                 # Conjugaison erronée. Accord avec « \1 ». Le verbe devrait être à la 2ᵉ personne du singulier.

TEST: Tu ne {{ment}} jamais.
TEST: Tu {{a}} mal ?
TEST: Tu ne le lui {{prend}} pas.
TEST: Tu ne m’{{attendra}} pas.
TEST: toi qui n’y {{connait}} rien, ne nous ennuie pas avec tes théories.


## 3sg
__[i]/conj(conj_il)__
    (?<!t’)(il) +({w_1})  @@0,$
    <<- morphex(\2, ":V", ":(?:3s|P|G)") and not (morph(\2, ":[PQ]", False) and morph(word(-1), ":V0.*:3s", False, False))
    -2>> =suggVerb(@, ":3s")                                 # Conjugaison erronée. Accord avec « \1 ». Le verbe devrait être à la 3ᵉ personne du singulier.







|







11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
    <<- morphex(\2, ":V", ":(?:G|2p|3p!|[ISK].*:2s)")
    -2>> =suggVerb(@, ":2s")                                 # Conjugaison erronée. Accord avec « \1 ». Le verbe devrait être à la 2ᵉ personne du singulier.

TEST: Tu ne {{ment}} jamais.
TEST: Tu {{a}} mal ?
TEST: Tu ne le lui {{prend}} pas.
TEST: Tu ne m’{{attendra}} pas.
TEST: toi qui n’y {{connaît}} rien, ne nous ennuie pas avec tes théories.


## 3sg
__[i]/conj(conj_il)__
    (?<!t’)(il) +({w_1})  @@0,$
    <<- morphex(\2, ":V", ":(?:3s|P|G)") and not (morph(\2, ":[PQ]", False) and morph(word(-1), ":V0.*:3s", False, False))
    -2>> =suggVerb(@, ":3s")                                 # Conjugaison erronée. Accord avec « \1 ». Le verbe devrait être à la 3ᵉ personne du singulier.
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043

12044
12045
12046
12047
12048
12049
12050


#### NOMS PROPRES

# 3sg
__[s]/conj(conj_nom_propre)__
    ([A-ZÉÈ][\w-]+) +({w_1})  @@0,$
    <<- not before(r"\b(?:et |ou |[dD][eu] |ni |[dD]e l’) *$") and morph(\1, ":M", False) and morphex(\2, ":[123][sp]", ":(?:G|3s|3p!|P|M|[AQ].*:[si])")
        and not morph(word(-1), ":[VRD]", False, False) and not before(r"([A-ZÉÈ][\w-]+), +([A-ZÉÈ][\w-]+), +$")
        and not (morph(\2, ":3p", False) and word(-1))
    -2>> =suggVerb(@, ":3s")
    # Conjugaison erronée. Accord avec « \1 ». Le verbe devrait être à la 3ᵉ personne du singulier.

TEST: Marc {{arrives}} demain.
TEST: Paul ne {{viens}} pas demain.
TEST: Marc Aurèle {{étaient}} l’empereur des Romains. (Pas de trait d’union sur Marc Aurèle.)
TEST: Arthur D. parvient à…
TEST: Les fondateurs pionniers de l’Internet n’avaient pas prévu
TEST: les compteurs Geiger ne détectent pas le polonium
TEST: Des femmes de l’administration Obama y racontent qu’elles ont constaté qu’il leur arrivait régulièrement de ne pas être invitées



# 3pl
__[s]/conj(conj_nom_propre_et_nom_propre)__
    (?<!et |ou |[dD][eu] |ni )([A-ZÉÈ][\w-]+) et ([A-ZÉÈ][\w-]+) +({w_1})  @@0,w,$
    <<- morph(\1, ":M", False) and morph(\2, ":M", False)
        and morphex(\3, ":[123][sp]", ":(?:G|3p|P|Q.*:[pi])") and not morph(word(-1), ":R", False, False)







|












>







12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060
12061


#### NOMS PROPRES

# 3sg
__[s]/conj(conj_nom_propre)__
    ([A-ZÉÈ][\w-]+) +({w_1})  @@0,$
    <<- not before(r"\b(?:et |ou |[dD][eu] |ni |[dD]e l’) *$") and morph(\1, ":M", False) and morphex(\2, ":[123][sp]", ":(?:G|3s|3p!|P|M|[AQ].*:[si]|N.*:m:s)")
        and not morph(word(-1), ":[VRD]", False, False) and not before(r"([A-ZÉÈ][\w-]+), +([A-ZÉÈ][\w-]+), +$")
        and not (morph(\2, ":3p", False) and word(-1))
    -2>> =suggVerb(@, ":3s")
    # Conjugaison erronée. Accord avec « \1 ». Le verbe devrait être à la 3ᵉ personne du singulier.

TEST: Marc {{arrives}} demain.
TEST: Paul ne {{viens}} pas demain.
TEST: Marc Aurèle {{étaient}} l’empereur des Romains. (Pas de trait d’union sur Marc Aurèle.)
TEST: Arthur D. parvient à…
TEST: Les fondateurs pionniers de l’Internet n’avaient pas prévu
TEST: les compteurs Geiger ne détectent pas le polonium
TEST: Des femmes de l’administration Obama y racontent qu’elles ont constaté qu’il leur arrivait régulièrement de ne pas être invitées
TEST: Macron président, c’est…


# 3pl
__[s]/conj(conj_nom_propre_et_nom_propre)__
    (?<!et |ou |[dD][eu] |ni )([A-ZÉÈ][\w-]+) et ([A-ZÉÈ][\w-]+) +({w_1})  @@0,w,$
    <<- morph(\1, ":M", False) and morph(\2, ":M", False)
        and morphex(\3, ":[123][sp]", ":(?:G|3p|P|Q.*:[pi])") and not morph(word(-1), ":R", False, False)
12059
12060
12061
12062
12063
12064
12065
12066
12067
12068
12069
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079
12080





12081



12082
12083
12084
12085
12086
12087
12088
12089
12090









12091
12092
12093
12094
12095
12096
12097
12098

12099
12100
12101
12102
12103
12104
12105
!!
!!
!!!! Inversion verbe sujet                                                                          
!!
!!

__[i]/conj(conj_que_où_comment_verbe_sujet_sing)__
    (?:que?|où|comment|combien|dont|quand|pourquoi|l[ea]s?quel(?:le|)s?) +({w1}) (l(?:e(?:ur | )|a |’)|[mts](?:on|a) |ce(?:t|tte|) |[nv]otre |du ) *(?!plupart|majorité)({w1})  @@w,w,$
    <<- morphex(\1, ":(?:[12]s|3p)", ":(?:3s|G|W|3p!)") and not after("^ +(?:et|ou) (?:l(?:es? |a |’|eurs? )|[mts](?:a|on|es) |ce(?:tte|ts|) |[nv]o(?:s|tre) |d(?:u|es) )")
    -1>> =suggVerb(@, ":3s")                                                        # Conjugaison erronée. Accord avec « \2 \3… ». Le verbe devrait être à la 3ᵉ personne du singulier.

TEST: les possibilités qu’{{offrent}} le chien
TEST: les possibilités que {{donnent}} le chien.
TEST: Où {{vas}} l’homme ?
TEST: l’histoire dont {{bénéficient}} notre langue.
TEST: de la même façon que survivent le swing ou le latin.
TEST: en proportion des efforts que requièrent son entretien et son exploitation
TEST: une crainte aussi que renforcent son éloignement et le protocole compliqué
TEST: à l’époque de Mansur que grandissent la place et le rôle de…
TEST: où vivent la majorité des immigrés et des enfants d’immigrés











__[i]/conj(conj_que_où_comment_verbe_sujet_pluriel)__
    (?:que?|où|comment|combien|dont|quand|pourquoi|l[ea]s?quel(?:le|)s?) +({w_3}) ([ldcmts]es|quelques|certaine?s|plusieurs|[nv]os|leurs) ({w1})  @@w,w,$
    <<- morphex(\1, ":[123]s", ":(?:3p|G|W)") -1>> =suggVerb(@, ":3p")              # Conjugaison erronée. Accord avec « \2 \3… ». Le verbe devrait être à la 3ᵉ personne du pluriel.

TEST: D’où {{vienne}} les hommes ?
TEST: Comment {{danse}} les femmes ?
TEST: les idioties qu’{{aime}} les hommes, j’en ai marre.
TEST: l’amour dont {{parle}} les femmes











__[i]/conj(conj_que_où_comment_verbe_sujet)__
    (?:que?|où|comment|combien|dont|quand|pourquoi|l[ea]s?quel(?:le|)s?) +({w_1}[es])(?! je| tu| vous) @@$
    <<- morphex(\1, ":[12][sp]", ":(?:G|W|3[sp]|Y|P|Q|N|M)") -1>> =suggVerb(@, ":3s")     # Conjugaison erronée. Ce verbe devrait être à la 3ᵉ personne.

TEST: Qu’est-ce que tu crois que {{deviens}} réellement, immanquablement et subitement cet homme après une telle dérouillée ?
TEST: ces sciences, ces religions, ces philosophies ne sont que contes destinés à nous donner l’illusion de la connaissance.
TEST: Le ministre de l’Intérieur a estimé que Calais avait vécu “un degré de violence jamais connu”.



#__[i]/conj(conj_que_où_verbe_sujet_3sg)__
#    (?:que?|où|comment|combien|dont|quand|pourquoi|l[ea]s?quel(?:le|)s?) +({w_1})(?! [jJ]e| [tT]u| [nNvV]ous| [eE]t )  @@$
#    <<- morphex(\1, ":[12][sp]", ":(?:G|W|N|3[sp])") -1>> =suggVerb(@, ":3s")         # Conjugaison erronée. Ce verbe devrait être à la 3ᵉ personne.

TEST: peu importe ce que tu crois du lieu où l’{{abandonnerais}} ce salopard…







|














>
>
>
>
>

>
>
>

|







>
>
>
>
>
>
>
>
>



|




>







12070
12071
12072
12073
12074
12075
12076
12077
12078
12079
12080
12081
12082
12083
12084
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102
12103
12104
12105
12106
12107
12108
12109
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133
12134
!!
!!
!!!! Inversion verbe sujet                                                                          
!!
!!

__[i]/conj(conj_que_où_comment_verbe_sujet_sing)__
    (?:que?|où|comment|combien|dont|quand|pourquoi) +({w1}) (l(?:e(?:ur | )|a |’)|[mts](?:on|a) |ce(?:t|tte|) |[nv]otre |du ) *(?!plupart|majorité)({w1})  @@w,w,$
    <<- morphex(\1, ":(?:[12]s|3p)", ":(?:3s|G|W|3p!)") and not after("^ +(?:et|ou) (?:l(?:es? |a |’|eurs? )|[mts](?:a|on|es) |ce(?:tte|ts|) |[nv]o(?:s|tre) |d(?:u|es) )")
    -1>> =suggVerb(@, ":3s")                                                        # Conjugaison erronée. Accord avec « \2 \3… ». Le verbe devrait être à la 3ᵉ personne du singulier.

TEST: les possibilités qu’{{offrent}} le chien
TEST: les possibilités que {{donnent}} le chien.
TEST: Où {{vas}} l’homme ?
TEST: l’histoire dont {{bénéficient}} notre langue.
TEST: de la même façon que survivent le swing ou le latin.
TEST: en proportion des efforts que requièrent son entretien et son exploitation
TEST: une crainte aussi que renforcent son éloignement et le protocole compliqué
TEST: à l’époque de Mansur que grandissent la place et le rôle de…
TEST: où vivent la majorité des immigrés et des enfants d’immigrés


__[i]/conj(conj_lxquel_verbe_sujet_sing)__
    (?:l[ea]s?quel(?:le|)s?) +({w1}) (l(?:e(?:ur | )|a |’)|[mts](?:on|a) |ce(?:t|tte|) |[nv]otre |du ) *(?!plupart|majorité)({w1})  @@w,w,$
    <<- morphex(\1, ":(?:[12]s|3p)", ":(?:3s|G|W|3p!)") and not after("^ +(?:et|ou) (?:l(?:es? |a |’|eurs? )|[mts](?:a|on|es) |ce(?:tte|ts|) |[nv]o(?:s|tre) |d(?:u|es) )")
        and morph(word(-1), ":R", False, False)
    -1>> =suggVerb(@, ":3s")                                                        # Conjugaison erronée. Accord avec « \2 \3… ». Le verbe devrait être à la 3ᵉ personne du singulier.

TEST: une muraille contre laquelle {{venaient}} la masse armée et vociférante.


__[i]/conj(conj_que_où_comment_verbe_sujet_pluriel)__
    (?:que?|où|comment|combien|dont|quand|pourquoi) +({w_3}) ([ldcmts]es|quelques|certaine?s|plusieurs|[nv]os|leurs) ({w1})  @@w,w,$
    <<- morphex(\1, ":[123]s", ":(?:3p|G|W)") -1>> =suggVerb(@, ":3p")              # Conjugaison erronée. Accord avec « \2 \3… ». Le verbe devrait être à la 3ᵉ personne du pluriel.

TEST: D’où {{vienne}} les hommes ?
TEST: Comment {{danse}} les femmes ?
TEST: les idioties qu’{{aime}} les hommes, j’en ai marre.
TEST: l’amour dont {{parle}} les femmes


__[i]/conj(conj_lxquel_verbe_sujet_pluriel)__
    (?:l[ea]s?quel(?:le|)s?) +({w_3}) ([ldcmts]es|quelques|certaine?s|plusieurs|[nv]os|leurs) ({w1})  @@w,w,$
    <<- morphex(\1, ":[123]s", ":(?:3p|G|W)") and morph(word(-1), ":R", False, False)
    -1>> =suggVerb(@, ":3p")              # Conjugaison erronée. Accord avec « \2 \3… ». Le verbe devrait être à la 3ᵉ personne du pluriel.

TEST: les amusements pour lesquels {{vienne}} les invités.
TEST: financé par le Qatar, lequel dépense des millions de dollars pour soutenir le mouvement


__[i]/conj(conj_que_où_comment_verbe_sujet)__
    (?:que?|où|comment|combien|dont|quand|pourquoi|l[ea]s?quel(?:le|)s?) +({w_1}[es])(?! je| tu| vous) @@$
    <<- morphex(\1, ":[12][sp]", ":(?:G|W|3[sp]|Y|P|Q|N|A|M)") -1>> =suggVerb(@, ":3s")     # Conjugaison erronée. Ce verbe devrait être à la 3ᵉ personne.

TEST: Qu’est-ce que tu crois que {{deviens}} réellement, immanquablement et subitement cet homme après une telle dérouillée ?
TEST: ces sciences, ces religions, ces philosophies ne sont que contes destinés à nous donner l’illusion de la connaissance.
TEST: Le ministre de l’Intérieur a estimé que Calais avait vécu “un degré de violence jamais connu”.
TEST: à des règles aussi absurdes que précises


#__[i]/conj(conj_que_où_verbe_sujet_3sg)__
#    (?:que?|où|comment|combien|dont|quand|pourquoi|l[ea]s?quel(?:le|)s?) +({w_1})(?! [jJ]e| [tT]u| [nNvV]ous| [eE]t )  @@$
#    <<- morphex(\1, ":[12][sp]", ":(?:G|W|N|3[sp])") -1>> =suggVerb(@, ":3s")         # Conjugaison erronée. Ce verbe devrait être à la 3ᵉ personne.

TEST: peu importe ce que tu crois du lieu où l’{{abandonnerais}} ce salopard…

Modified gc_lang/fr/setup.py from [e665601366] to [7f38b8e2c3].

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
    #     'test': ['coverage'],
    # },

    # If there are data files included in your packages that need to be
    # installed, specify them here.  If using Python 2.6 or less, then these
    # have to be included in MANIFEST.in as well.
    package_data={
        'grammalecte': ['graphspell/_dictionaries/fr.bdic', '*.txt']
    },

    # Although 'package_data' is the preferred approach, in some case you may
    # need to place data files outside of your packages. See:
    # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
    # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
    # data_files=[('my_data', ['data/data_file'])],







|







88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
    #     'test': ['coverage'],
    # },

    # If there are data files included in your packages that need to be
    # installed, specify them here.  If using Python 2.6 or less, then these
    # have to be included in MANIFEST.in as well.
    package_data={
        'grammalecte': ['graphspell/_dictionaries/*.bdic', '*.txt']
    },

    # Although 'package_data' is the preferred approach, in some case you may
    # need to place data files outside of your packages. See:
    # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
    # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
    # data_files=[('my_data', ['data/data_file'])],

Modified gc_lang/fr/tb/content/overlay.js from [fefd8da4f7] to [99ae888938].

692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
                    document.getElementById('res_o_within_quotation_marks').textContent = n1;
                }
                document.getElementById("o_group_ssp").checked = false;
                this.switchGroup("o_group_ssp");
            }
            document.getElementById('progressbar').value = 1;

            // espaces typographiques
            if (document.getElementById("o_group_nbsp").checked) {
                if (document.getElementById("o_nbsp_before_punctuation").checked) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_punctuation");
                    [sText, n2] = this.formatText(sText, "nbsp_repair");
                    document.getElementById('res_o_nbsp_before_punctuation').textContent = n1 - n2;
                }
                if (document.getElementById("o_nbsp_within_quotation_marks").checked) {







|







692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
                    document.getElementById('res_o_within_quotation_marks').textContent = n1;
                }
                document.getElementById("o_group_ssp").checked = false;
                this.switchGroup("o_group_ssp");
            }
            document.getElementById('progressbar').value = 1;

            // espaces insécables
            if (document.getElementById("o_group_nbsp").checked) {
                if (document.getElementById("o_nbsp_before_punctuation").checked) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_punctuation");
                    [sText, n2] = this.formatText(sText, "nbsp_repair");
                    document.getElementById('res_o_nbsp_before_punctuation').textContent = n1 - n2;
                }
                if (document.getElementById("o_nbsp_within_quotation_marks").checked) {
715
716
717
718
719
720
721




722
723
724
725
726
727
728
                    [sText, n1] = this.formatText(sText, "nbsp_within_numbers");
                    document.getElementById('res_o_nbsp_within_numbers').textContent = n1;
                }
                if (document.getElementById("o_nbsp_before_units").checked) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_units");
                    document.getElementById('res_o_nbsp_before_units').textContent = n1;
                }




                document.getElementById("o_group_nbsp").checked = false;
                this.switchGroup("o_group_nbsp");
            }
            document.getElementById('progressbar').value = 2;

            // espaces manquants
            if (document.getElementById("o_group_typo").checked) {







>
>
>
>







715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
                    [sText, n1] = this.formatText(sText, "nbsp_within_numbers");
                    document.getElementById('res_o_nbsp_within_numbers').textContent = n1;
                }
                if (document.getElementById("o_nbsp_before_units").checked) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_units");
                    document.getElementById('res_o_nbsp_before_units').textContent = n1;
                }
                if (document.getElementById("o_nbsp_titles").checked) {
                    [sText, n1] = this.formatText(sText, "nbsp_titles");
                    document.getElementById('res_o_nbsp_titles').textContent = n1;
                }
                document.getElementById("o_group_nbsp").checked = false;
                this.switchGroup("o_group_nbsp");
            }
            document.getElementById('progressbar').value = 2;

            // espaces manquants
            if (document.getElementById("o_group_typo").checked) {

Modified gc_lang/fr/tb/content/overlay.xul from [4bf693a054] to [0101f0b174].

170
171
172
173
174
175
176





177
178
179
180
181
182
183
                <label id="res_o_nbsp_within_numbers" class="result" />
              </hbox>
              <hbox class="blockopt underline">
                <checkbox id="o_nbsp_before_units" class="option" data-default="true" label="&tf_nbsp_before_units;" />
                <spacer flex="1" />
                <label id="res_o_nbsp_before_units" class="result" />
              </hbox>





            </vbox>
          </groupbox>

          <!-- Deletions -->
          <groupbox>
            <caption><checkbox id="o_group_delete" class="option optiongroup" data-default="true" label="&tf_delete;" /></caption>
            <vbox id="group_delete" class="groupblock">







>
>
>
>
>







170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
                <label id="res_o_nbsp_within_numbers" class="result" />
              </hbox>
              <hbox class="blockopt underline">
                <checkbox id="o_nbsp_before_units" class="option" data-default="true" label="&tf_nbsp_before_units;" />
                <spacer flex="1" />
                <label id="res_o_nbsp_before_units" class="result" />
              </hbox>
              <hbox class="blockopt underline">
                <checkbox id="o_nbsp_titles" class="option" data-default="true" label="&tf_nbsp_titles;" />
                <spacer flex="1" />
                <label id="res_o_nbsp_titles" class="result" />
              </hbox>
            </vbox>
          </groupbox>

          <!-- Deletions -->
          <groupbox>
            <caption><checkbox id="o_group_delete" class="option optiongroup" data-default="true" label="&tf_delete;" /></caption>
            <vbox id="group_delete" class="groupblock">

Modified gc_lang/fr/tb/locale/en/overlay.dtd from [c3723dc977] to [1e54ceb89b].

33
34
35
36
37
38
39

40
41
42
43
44
45
46
<!ENTITY tf_add_space_around_hyphens "Surrounding dashes">
<!ENTITY tf_nbsp "Non breaking spaces">
<!ENTITY tf_nbsp_before_punctuation "Before : ; ? and !">
<!ENTITY tf_nbsp_within_quotation_marks "With quoting marks « and »">
<!ENTITY tf_nbsp_before_symbol "Before &#x0025; ‰ € $ £ ¥ ˚C">
<!ENTITY tf_nbsp_within_numbers "Within numbers">
<!ENTITY tf_nbsp_before_units "Before units of measurement">

<!ENTITY tf_delete "Deletions">
<!ENTITY tf_erase_non_breaking_hyphens "Soft hyphens">
<!ENTITY tf_typo "Typographical signs">
<!ENTITY tf_ts_apostrophe "Apostrophe (’)">
<!ENTITY tf_ts_ellipsis "Ellipsis (…)">
<!ENTITY tf_ts_dash_middle "Dashes:">
<!ENTITY tf_ts_dash_start "Dashes at beginning of paragraph:">







>







33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
<!ENTITY tf_add_space_around_hyphens "Surrounding dashes">
<!ENTITY tf_nbsp "Non breaking spaces">
<!ENTITY tf_nbsp_before_punctuation "Before : ; ? and !">
<!ENTITY tf_nbsp_within_quotation_marks "With quoting marks « and »">
<!ENTITY tf_nbsp_before_symbol "Before &#x0025; ‰ € $ £ ¥ ˚C">
<!ENTITY tf_nbsp_within_numbers "Within numbers">
<!ENTITY tf_nbsp_before_units "Before units of measurement">
<!ENTITY tf_nbsp_titles "After titles">
<!ENTITY tf_delete "Deletions">
<!ENTITY tf_erase_non_breaking_hyphens "Soft hyphens">
<!ENTITY tf_typo "Typographical signs">
<!ENTITY tf_ts_apostrophe "Apostrophe (’)">
<!ENTITY tf_ts_ellipsis "Ellipsis (…)">
<!ENTITY tf_ts_dash_middle "Dashes:">
<!ENTITY tf_ts_dash_start "Dashes at beginning of paragraph:">

Modified gc_lang/fr/tb/locale/fr/overlay.dtd from [816cf299c9] to [93a8ef80c4].

33
34
35
36
37
38
39

40
41
42
43
44
45
46
<!ENTITY tf_add_space_around_hyphens "Autour des tirets d’incise">
<!ENTITY tf_nbsp "Espaces insécables">
<!ENTITY tf_nbsp_before_punctuation "Avant : ; ? et !">
<!ENTITY tf_nbsp_within_quotation_marks "Avec les guillemets « et »">
<!ENTITY tf_nbsp_before_symbol "Avant &#x0025; ‰ € $ £ ¥ ˚C">
<!ENTITY tf_nbsp_within_numbers "À l’intérieur des nombres">
<!ENTITY tf_nbsp_before_units "Avant les unités de mesure">

<!ENTITY tf_delete "Suppressions">
<!ENTITY tf_erase_non_breaking_hyphens "Tirets conditionnels">
<!ENTITY tf_typo "Signes typographiques">
<!ENTITY tf_ts_apostrophe "Apostrophe (’)">
<!ENTITY tf_ts_ellipsis "Points de suspension (…)">
<!ENTITY tf_ts_dash_middle "Tirets d’incise :">
<!ENTITY tf_ts_dash_start "Tirets en début de paragraphe :">







>







33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
<!ENTITY tf_add_space_around_hyphens "Autour des tirets d’incise">
<!ENTITY tf_nbsp "Espaces insécables">
<!ENTITY tf_nbsp_before_punctuation "Avant : ; ? et !">
<!ENTITY tf_nbsp_within_quotation_marks "Avec les guillemets « et »">
<!ENTITY tf_nbsp_before_symbol "Avant &#x0025; ‰ € $ £ ¥ ˚C">
<!ENTITY tf_nbsp_within_numbers "À l’intérieur des nombres">
<!ENTITY tf_nbsp_before_units "Avant les unités de mesure">
<!ENTITY tf_nbsp_titles "Après les titres de civilité">
<!ENTITY tf_delete "Suppressions">
<!ENTITY tf_erase_non_breaking_hyphens "Tirets conditionnels">
<!ENTITY tf_typo "Signes typographiques">
<!ENTITY tf_ts_apostrophe "Apostrophe (’)">
<!ENTITY tf_ts_ellipsis "Points de suspension (…)">
<!ENTITY tf_ts_dash_middle "Tirets d’incise :">
<!ENTITY tf_ts_dash_start "Tirets en début de paragraphe :">

Modified gc_lang/fr/webext/content_scripts/menu.css from [8c6775f5ab] to [ccd6be088e].

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    all: initial;
    position: absolute;
    box-sizing: border-box;
    display: none;
    margin: -8px 0 0 -8px;
    width: 16px;
    height: 16px;
    background-color: hsla(210, 80%, 40%, .2);
    border: 4px solid hsla(210, 80%, 30%, .5);
    border-top: 4px solid hsla(210, 100%, 70%, .9);
    border-bottom: 4px solid hsla(210, 100%, 70%, .9);
    border-radius: 50%;
    text-align: center;
    cursor: pointer;
    box-shadow: 0 0 0 0 hsla(210, 80%, 50%, .5);
    /*z-index: 2147483640; /* maximum is 2147483647: https://stackoverflow.com/questions/491052/minimum-and-maximum-value-of-z-index */
    animation: grammalecte-spin 2s ease 1;
}
div.grammalecte_menu_main_button:hover {
    background-color: hsla(180, 100%, 55%, .6);
    animation: grammalecte-spin .5s linear infinite;
}

@keyframes grammalecte-spin {
    0% {
        transform: rotate(0deg) scale(1);
    }







|
|
|
|



|




|







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
    all: initial;
    position: absolute;
    box-sizing: border-box;
    display: none;
    margin: -8px 0 0 -8px;
    width: 16px;
    height: 16px;
    background-color: hsla(210, 80%, 95%, .5);
    border: 3px solid hsla(210, 80%, 50%, .9);
    border-top: 3px solid hsla(210, 80%, 90%, .9);
    border-left: 3px solid hsla(210, 80%, 90%, .9);
    border-radius: 50%;
    text-align: center;
    cursor: pointer;
    box-shadow: 0 0 0 0 hsla(210, 80%, 50%, .7);
    /*z-index: 2147483640; /* maximum is 2147483647: https://stackoverflow.com/questions/491052/minimum-and-maximum-value-of-z-index */
    animation: grammalecte-spin 2s ease 1;
}
div.grammalecte_menu_main_button:hover {
    background-color: hsla(210, 80%, 50%, .05);
    animation: grammalecte-spin .5s linear infinite;
}

@keyframes grammalecte-spin {
    0% {
        transform: rotate(0deg) scale(1);
    }

Modified gc_lang/fr/webext/content_scripts/panel.css from [2b6e1d1881] to [b1d4b36570].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*
    CSS
    Content panels for Grammalecte
*/

div.grammalecte_panel {
    all: initial;
    padding: 0;
    margin: 0;
    position: fixed;
    box-sizing: content-box;
    z-index: 2147483641; /* maximum is 2147483647: https://stackoverflow.com/questions/491052/minimum-and-maximum-value-of-z-index */
    border: 2px solid hsl(210, 10%, 50%);
    border-radius: 10px 10px 10px 10px;
    background-color: hsl(210, 0%, 100%);
    color: hsl(0, 0%, 0%);
    font-family: "Trebuchet MS", "Fira Sans", "Liberation Sans", sans-serif;
    box-shadow: 0 0 2px 1px hsla(210, 50%, 50%, .5);
    line-height: normal;
    text-shadow: none;
    text-decoration: none;
    text-align: left;
    hyphens: none;
}
div.grammalecte_panel img {
    display: inline-block;
    margin: 0;
    padding: 0;
}

div.grammalecte_panel_bar {
    position: sticky;
    width: 100%;
    background-color: hsl(210, 0%, 90%);
    border-radius: 10px 10px 0 0;
    border-bottom: 1px solid hsl(210, 10%, 80%);
    color: hsl(210, 10%, 4%);
    font-size: 20px;
}
div.grammalecte_panel_title {
    padding: 10px 20px;
}
div.grammalecte_panel_label {
    display: inline-block;












|




|















|

|
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*
    CSS
    Content panels for Grammalecte
*/

div.grammalecte_panel {
    all: initial;
    padding: 0;
    margin: 0;
    position: fixed;
    box-sizing: content-box;
    z-index: 2147483641; /* maximum is 2147483647: https://stackoverflow.com/questions/491052/minimum-and-maximum-value-of-z-index */
    border: 2px solid hsl(210, 50%, 50%);
    border-radius: 10px 10px 10px 10px;
    background-color: hsl(210, 0%, 100%);
    color: hsl(0, 0%, 0%);
    font-family: "Trebuchet MS", "Fira Sans", "Liberation Sans", sans-serif;
    box-shadow: 0 0 1px 6px hsla(210, 50%, 50%, .5);
    line-height: normal;
    text-shadow: none;
    text-decoration: none;
    text-align: left;
    hyphens: none;
}
div.grammalecte_panel img {
    display: inline-block;
    margin: 0;
    padding: 0;
}

div.grammalecte_panel_bar {
    position: sticky;
    width: 100%;
    background-color: hsl(210, 20%, 92%);
    border-radius: 10px 10px 0 0;
    border-bottom: 1px solid hsl(210, 20%, 86%);
    color: hsl(210, 30%, 40%);
    font-size: 20px;
}
div.grammalecte_panel_title {
    padding: 10px 20px;
}
div.grammalecte_panel_label {
    display: inline-block;
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
    font-size: 22px;
    font-weight: bold;
    color: hsl(150, 0%, 100%);
    text-align: center;
    cursor: pointer;
}
div.grammalecte_copy_button:hover {
    background-color: hsl(150, 100%, 40%);
}
div.grammalecte_move_button {
    display: inline-block;
    padding: 2px 5px;
    background-color: hsl(180, 80%, 50%);
    font-family: "Trebuchet MS", "Fira Sans", "Liberation Sans", sans-serif;
    font-size: 22px;
    font-weight: bold;
    color: hsl(180, 0%, 100%);
    text-align: center;
    cursor: pointer;
}
div.grammalecte_move_button:hover {
    background-color: hsl(180, 100%, 60%);
}
div.grammalecte_close_button {
    display: inline-block;
    padding: 2px 10px;
    border-radius: 0 8px 0 0;
    background-color: hsl(0, 80%, 50%);
    font-family: "Trebuchet MS", "Fira Sans", "Liberation Sans", sans-serif;







|




|








|







64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
    font-size: 22px;
    font-weight: bold;
    color: hsl(150, 0%, 100%);
    text-align: center;
    cursor: pointer;
}
div.grammalecte_copy_button:hover {
    background-color: hsl(150, 90%, 35%);
}
div.grammalecte_move_button {
    display: inline-block;
    padding: 2px 5px;
    background-color: hsl(180, 50%, 60%);
    font-family: "Trebuchet MS", "Fira Sans", "Liberation Sans", sans-serif;
    font-size: 22px;
    font-weight: bold;
    color: hsl(180, 0%, 100%);
    text-align: center;
    cursor: pointer;
}
div.grammalecte_move_button:hover {
    background-color: hsl(180, 80%, 65%);
}
div.grammalecte_close_button {
    display: inline-block;
    padding: 2px 10px;
    border-radius: 0 8px 0 0;
    background-color: hsl(0, 80%, 50%);
    font-family: "Trebuchet MS", "Fira Sans", "Liberation Sans", sans-serif;

Modified gc_lang/fr/webext/content_scripts/panel_gc.css from [eee69d8f3d] to [397d8a48b5].

46
47
48
49
50
51
52

53
54
55
56
57
58
59
}
div.grammalecte_paragraph_button:hover {
    background-color: hsl(0, 0%, 40%);
    color: hsl(0, 0%, 100%);
}

div.grammalecte_paragraph_actions .grammalecte_green {

    background-color: hsl(120, 30%, 50%);
    color: hsl(0, 0%, 96%);
}
div.grammalecte_paragraph_actions .grammalecte_green:hover {
    background-color: hsl(120, 50%, 40%);
    color: hsl(0, 0%, 100%);
}







>







46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
}
div.grammalecte_paragraph_button:hover {
    background-color: hsl(0, 0%, 40%);
    color: hsl(0, 0%, 100%);
}

div.grammalecte_paragraph_actions .grammalecte_green {
    width: 80px;
    background-color: hsl(120, 30%, 50%);
    color: hsl(0, 0%, 96%);
}
div.grammalecte_paragraph_actions .grammalecte_green:hover {
    background-color: hsl(120, 50%, 40%);
    color: hsl(0, 0%, 100%);
}

Modified gc_lang/fr/webext/content_scripts/panel_gc.js from [6bf4fd5886] to [499bdc1ad7].

55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
        this.oNodeControl = new GrammalecteNodeControl();
    }

    start (xNode=null) {
        this.oTooltip.hide();
        this.clear();
        if (xNode) {
            if (xNode.tagName == "TEXTAREA") {
                this.oNodeControl.setNode(xNode);
            } else {
                this.oNodeControl.clear();
                this.addMessage("Cette zone de texte n’est pas un champ de formulaire “textarea” mais un node HTML éditable. Les modifications ne seront pas répercutées automatiquement. Une fois votre texte corrigé, vous pouvez utiliser le bouton ‹∑› pour copier le texte dans le presse-papiers.");
            }
        }
    }

    clear () {
        while (this.xParagraphList.firstChild) {
            this.xParagraphList.removeChild(this.xParagraphList.firstChild);







<
|
<
|
|







55
56
57
58
59
60
61

62

63
64
65
66
67
68
69
70
71
        this.oNodeControl = new GrammalecteNodeControl();
    }

    start (xNode=null) {
        this.oTooltip.hide();
        this.clear();
        if (xNode) {

            this.oNodeControl.setNode(xNode);

            if (xNode.tagName != "TEXTAREA") {
                this.addMessage("Note : cette zone de texte n’est pas un champ de formulaire “textarea” mais un node HTML éditable. Une telle zone de texte est susceptible de contenir des éléments non textuels qui seront effacés lors de la correction.");
            }
        }
    }

    clear () {
        while (this.xParagraphList.firstChild) {
            this.xParagraphList.removeChild(this.xParagraphList.firstChild);
193
194
195
196
197
198
199


200
201
202
203
204


205
206
207
208
209
210
211
        xNodeErr.className = (this.aIgnoredErrors.has(xNodeErr.dataset.ignored_key)) ? "grammalecte_error_ignored" : "grammalecte_error grammalecte_error_" + oErr['sType'];
        return xNodeErr;
    }

    blockParagraph (xParagraph) {
        xParagraph.contentEditable = "false";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).textContent = "Analyse…";


    }

    freeParagraph (xParagraph) {
        xParagraph.contentEditable = "true";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).textContent = "Réanalyser";


    }

    applySuggestion (sNodeSuggId) { // sugg
        try {
            let sErrorId = document.getElementById(sNodeSuggId).dataset.error_id;
            //let sParaNum = sErrorId.slice(0, sErrorId.indexOf("-"));
            let xNodeErr = document.getElementById("grammalecte_err" + sErrorId);







>
>





>
>







191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
        xNodeErr.className = (this.aIgnoredErrors.has(xNodeErr.dataset.ignored_key)) ? "grammalecte_error_ignored" : "grammalecte_error grammalecte_error_" + oErr['sType'];
        return xNodeErr;
    }

    blockParagraph (xParagraph) {
        xParagraph.contentEditable = "false";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).textContent = "Analyse…";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).style.backgroundColor = "hsl(0, 50%, 50%)";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).style.boxShadow = "0 0 0 3px hsla(0, 100%, 50%, .2)";
    }

    freeParagraph (xParagraph) {
        xParagraph.contentEditable = "true";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).textContent = "Réanalyser";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).style.backgroundColor = "hsl(120, 30%, 50%)";
        document.getElementById("grammalecte_check"+xParagraph.dataset.para_num).style.boxShadow = "none";
    }

    applySuggestion (sNodeSuggId) { // sugg
        try {
            let sErrorId = document.getElementById(sNodeSuggId).dataset.error_id;
            //let sParaNum = sErrorId.slice(0, sErrorId.indexOf("-"));
            let xNodeErr = document.getElementById("grammalecte_err" + sErrorId);
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444

class GrammalecteNodeControl {

    constructor () {
        this.xNode = null;
        this.dParagraph = new Map();
        this.bTextArea = null;
        this.bWriteEN = false;  // write editable node
    }

    setNode (xNode) {
        this.clear();
        this.xNode = xNode;
        this.bTextArea = (xNode.tagName == "TEXTAREA");
        this.xNode.disabled = true;







<







432
433
434
435
436
437
438

439
440
441
442
443
444
445

class GrammalecteNodeControl {

    constructor () {
        this.xNode = null;
        this.dParagraph = new Map();
        this.bTextArea = null;

    }

    setNode (xNode) {
        this.clear();
        this.xNode = xNode;
        this.bTextArea = (xNode.tagName == "TEXTAREA");
        this.xNode.disabled = true;
470
471
472
473
474
475
476
477






478
479
480

481
482
483
484
485
486
487









488

489
490
491
492
            this.dParagraph.set(i, sText.slice(iStart, iEnd));
            i++;
            iStart = iEnd+1;
        }
        this.dParagraph.set(i, sText.slice(iStart));
        //console.log("Paragraphs number: " + (i+1));
    }







    write () {
        if (this.xNode !== null && (this.bTextArea || this.bWriteEN)) {
            let sText = "";

            this.dParagraph.forEach(function (val, key) {
                sText += val + "\n";
            });
            sText = sText.slice(0,-1).normalize("NFC");
            if (this.bTextArea) {
                this.xNode.value = sText;
            } else {









                this.xNode.textContent = sText;

            }
        }
    }
}








>
>
>
>
>
>

|

>
|
|
|
<
<
|

>
>
>
>
>
>
>
>
>
|
>




471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491


492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
            this.dParagraph.set(i, sText.slice(iStart, iEnd));
            i++;
            iStart = iEnd+1;
        }
        this.dParagraph.set(i, sText.slice(iStart));
        //console.log("Paragraphs number: " + (i+1));
    }

    eraseContent () {
        while (this.xNode.firstChild) {
            this.xNode.removeChild(this.xNode.firstChild);
        }
    }

    write () {
        if (this.xNode !== null) {
            let sText = "";
            if (this.bTextArea) {
                this.dParagraph.forEach(function (val, key) {
                    sText += val + "\n";
                });


                this.xNode.value = sText.slice(0,-1).normalize("NFC");
            } else {
                this.eraseContent();
                this.dParagraph.forEach((val, key) => {
                    this.xNode.appendChild(document.createTextNode(val.normalize("NFC")));
                    this.xNode.appendChild(document.createElement("br"));
                });
                /*
                this.dParagraph.forEach(function (val, key) {
                    sText += val + "<br/>";
                });
                this.xNode.innerHTML = sText.normalize("NFC");
                */
            }
        }
    }
}

Modified gc_lang/fr/webext/content_scripts/panel_tf.js from [7070609846] to [d7b30c236e].

32
33
34
35
36
37
38

39
40
41
42
43
44
45
            xSpace.appendChild(this._createBlockOption("o_add_space_around_hyphens", true, "Autour des tirets d’incise"));
            let xNBSP = this._createFieldset("group_nbsp", true, "Espaces insécables");
            xNBSP.appendChild(this._createBlockOption("o_nbsp_before_punctuation", true, "Avant : ; ? et !"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_within_quotation_marks", true, "Avec les guillemets « et »"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_before_symbol", true, "Avant % ‰ € $ £ ¥ ˚C"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_within_numbers", true, "À l’intérieur des nombres"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_before_units", true, "Avant les unités de mesure"));

            let xDelete = this._createFieldset("group_delete", true, "Suppressions");
            xDelete.appendChild(this._createBlockOption("o_erase_non_breaking_hyphens", true, "Tirets conditionnels"));
            let xColumn2 = oGrammalecte.createNode("div", {className: "grammalecte_tf_column"});
            let xTypo = this._createFieldset("group_typo", true, "Signes typographiques");
            xTypo.appendChild(this._createBlockOption("o_ts_apostrophe", true, "Apostrophe (’)"));
            xTypo.appendChild(this._createBlockOption("o_ts_ellipsis", true, "Points de suspension (…)"));
            xTypo.appendChild(this._createBlockOption("o_ts_dash_middle", true, "Tirets d’incise :"));







>







32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
            xSpace.appendChild(this._createBlockOption("o_add_space_around_hyphens", true, "Autour des tirets d’incise"));
            let xNBSP = this._createFieldset("group_nbsp", true, "Espaces insécables");
            xNBSP.appendChild(this._createBlockOption("o_nbsp_before_punctuation", true, "Avant : ; ? et !"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_within_quotation_marks", true, "Avec les guillemets « et »"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_before_symbol", true, "Avant % ‰ € $ £ ¥ ˚C"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_within_numbers", true, "À l’intérieur des nombres"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_before_units", true, "Avant les unités de mesure"));
            xNBSP.appendChild(this._createBlockOption("o_nbsp_titles", true, "Après les titres de civilité"));
            let xDelete = this._createFieldset("group_delete", true, "Suppressions");
            xDelete.appendChild(this._createBlockOption("o_erase_non_breaking_hyphens", true, "Tirets conditionnels"));
            let xColumn2 = oGrammalecte.createNode("div", {className: "grammalecte_tf_column"});
            let xTypo = this._createFieldset("group_typo", true, "Signes typographiques");
            xTypo.appendChild(this._createBlockOption("o_ts_apostrophe", true, "Apostrophe (’)"));
            xTypo.appendChild(this._createBlockOption("o_ts_ellipsis", true, "Points de suspension (…)"));
            xTypo.appendChild(this._createBlockOption("o_ts_dash_middle", true, "Tirets d’incise :"));
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
                    document.getElementById('res_o_within_quotation_marks').textContent = n1;
                }
                this.setOption("o_group_ssp", false);
                this.switchGroup("o_group_ssp");
            }
            document.getElementById('grammalecte_tf_progressbar').value = 2;

            // espaces typographiques
            if (this.isSelected("o_group_nbsp")) {
                if (this.isSelected("o_nbsp_before_punctuation")) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_punctuation");
                    [sText, n2] = this.formatText(sText, "nbsp_repair");
                    document.getElementById('res_o_nbsp_before_punctuation').textContent = n1 - n2;
                }
                if (this.isSelected("o_nbsp_within_quotation_marks")) {







|







307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
                    document.getElementById('res_o_within_quotation_marks').textContent = n1;
                }
                this.setOption("o_group_ssp", false);
                this.switchGroup("o_group_ssp");
            }
            document.getElementById('grammalecte_tf_progressbar').value = 2;

            // espaces insécables
            if (this.isSelected("o_group_nbsp")) {
                if (this.isSelected("o_nbsp_before_punctuation")) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_punctuation");
                    [sText, n2] = this.formatText(sText, "nbsp_repair");
                    document.getElementById('res_o_nbsp_before_punctuation').textContent = n1 - n2;
                }
                if (this.isSelected("o_nbsp_within_quotation_marks")) {
329
330
331
332
333
334
335




336
337
338
339
340
341
342
                    [sText, n1] = this.formatText(sText, "nbsp_within_numbers");
                    document.getElementById('res_o_nbsp_within_numbers').textContent = n1;
                }
                if (this.isSelected("o_nbsp_before_units")) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_units");
                    document.getElementById('res_o_nbsp_before_units').textContent = n1;
                }




                this.setOption("o_group_nbsp", false);
                this.switchGroup("o_group_nbsp");
            }
            document.getElementById('grammalecte_tf_progressbar').value = 3;

            // espaces manquants
            if (this.isSelected("o_group_typo")) {







>
>
>
>







330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
                    [sText, n1] = this.formatText(sText, "nbsp_within_numbers");
                    document.getElementById('res_o_nbsp_within_numbers').textContent = n1;
                }
                if (this.isSelected("o_nbsp_before_units")) {
                    [sText, n1] = this.formatText(sText, "nbsp_before_units");
                    document.getElementById('res_o_nbsp_before_units').textContent = n1;
                }
                if (this.isSelected("o_nbsp_titles")) {
                    [sText, n1] = this.formatText(sText, "nbsp_titles");
                    document.getElementById('res_o_nbsp_titles').textContent = n1;
                }
                this.setOption("o_group_nbsp", false);
                this.switchGroup("o_group_nbsp");
            }
            document.getElementById('grammalecte_tf_progressbar').value = 3;

            // espaces manquants
            if (this.isSelected("o_group_typo")) {

Modified gc_lang/fr/webext/manifest.json from [779b2988db] to [57d55716f4].

1
2
3
4
5
6
7
8
9
10
11
12
{
  "manifest_version": 2,
  "name": "Grammalecte [fr]",
  "short_name": "Grammalecte [fr]",
  "version": "0.6.3.2",

  "applications": {
    "gecko": {
      "id": "French-GC@grammalecte.net",
      "strict_min_version": "56.0"
    }
  },




|







1
2
3
4
5
6
7
8
9
10
11
12
{
  "manifest_version": 2,
  "name": "Grammalecte [fr]",
  "short_name": "Grammalecte [fr]",
  "version": "0.6.4.2",

  "applications": {
    "gecko": {
      "id": "French-GC@grammalecte.net",
      "strict_min_version": "56.0"
    }
  },

Modified gc_lang/fr/webext/panel/lex_editor.js from [6d5b05bd83] to [afffd7df96].

605
606
607
608
609
610
611
612
613
614
615
616
617
618
619


const oSearch = {

    oSpellChecker: null,

    load: function () {
        this.oSpellChecker = new SpellChecker("fr", browser.extension.getURL("")+"grammalecte/graphspell/_dictionaries", "fr.json");
    },

    loadOtherDictionaries: function () {
        //TODO
    },

    listen: function () {







|







605
606
607
608
609
610
611
612
613
614
615
616
617
618
619


const oSearch = {

    oSpellChecker: null,

    load: function () {
        this.oSpellChecker = new SpellChecker("fr", browser.extension.getURL("")+"grammalecte/graphspell/_dictionaries", "fr-allvars.json");
    },

    loadOtherDictionaries: function () {
        //TODO
    },

    listen: function () {

Modified gc_lang/fr/xpi/data/dictionaries/fr-FR-classic-reform/fr-FR-classic-reform.aff from [a14cde38dd] to [4245d700cf].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “TOUTES VARIANTES” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “TOUTES VARIANTES” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/xpi/data/dictionaries/fr-FR-classic/fr-FR-classic.aff from [34001eab6b] to [7aba573e1d].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “CLASSIQUE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “CLASSIQUE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/xpi/data/dictionaries/fr-FR-modern/fr-FR-modern.aff from [e720fb281a] to [bc39751b7a].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “MODERNE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “MODERNE” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified gc_lang/fr/xpi/data/dictionaries/fr-FR-reform/fr-FR-reform.aff from [e353a9ecb8] to [5fff0168b9].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “RÉFORME 1990” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 02-04-2018 à 16:28
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

# AFFIXES DU DICTIONNAIRE ORTHOGRAPHIQUE FRANÇAIS “RÉFORME 1990” v6.3
# par Olivier R. -- licence MPL 2.0
# Généré le 05-05-2018 à 15:38
# Pour améliorer le dictionnaire, allez sur http://www.dicollecte.org/



SET UTF-8

WORDCHARS -’'1234567890.

Modified graphspell-js/char_player.js from [0baf69300e] to [c2f75d3e03].

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

    _xTransCharsForSimplification: new Map([
        ['à', 'a'],  ['é', 'e'],  ['î', 'i'],  ['ô', 'o'],  ['û', 'u'],  ['ÿ', 'i'],  ['y', 'i'],
        ['â', 'a'],  ['è', 'e'],  ['ï', 'i'],  ['ö', 'o'],  ['ù', 'u'],  ['ŷ', 'i'],
        ['ä', 'a'],  ['ê', 'e'],  ['í', 'i'],  ['ó', 'o'],  ['ü', 'u'],  ['ý', 'i'],
        ['á', 'a'],  ['ë', 'e'],  ['ì', 'i'],  ['ò', 'o'],  ['ú', 'u'],  ['ỳ', 'i'],
        ['ā', 'a'],  ['ē', 'e'],  ['ī', 'i'],  ['ō', 'o'],  ['ū', 'u'],  ['ȳ', 'i'],
        ['ñ', 'n'],  ['k', 'q'],  ['w', 'v'],
        ['œ', 'oe'], ['æ', 'ae'], 
        ['ſ', 's'],  ['ffi', 'ffi'],  ['ffl', 'ffl'],  ['ff', 'ff'],  ['ſt', 'ft'],  ['fi', 'fi'],  ['fl', 'fl'],  ['st', 'st']
    ]),

    simplifyWord: function (sWord) {
        // word simplication before calculating distance between words
        sWord = sWord.toLowerCase();







|







20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

    _xTransCharsForSimplification: new Map([
        ['à', 'a'],  ['é', 'e'],  ['î', 'i'],  ['ô', 'o'],  ['û', 'u'],  ['ÿ', 'i'],  ['y', 'i'],
        ['â', 'a'],  ['è', 'e'],  ['ï', 'i'],  ['ö', 'o'],  ['ù', 'u'],  ['ŷ', 'i'],
        ['ä', 'a'],  ['ê', 'e'],  ['í', 'i'],  ['ó', 'o'],  ['ü', 'u'],  ['ý', 'i'],
        ['á', 'a'],  ['ë', 'e'],  ['ì', 'i'],  ['ò', 'o'],  ['ú', 'u'],  ['ỳ', 'i'],
        ['ā', 'a'],  ['ē', 'e'],  ['ī', 'i'],  ['ō', 'o'],  ['ū', 'u'],  ['ȳ', 'i'],
        ['ç', 'c'],  ['ñ', 'n'],  ['k', 'q'],  ['w', 'v'],
        ['œ', 'oe'], ['æ', 'ae'], 
        ['ſ', 's'],  ['ffi', 'ffi'],  ['ffl', 'ffl'],  ['ff', 'ff'],  ['ſt', 'ft'],  ['fi', 'fi'],  ['fl', 'fl'],  ['st', 'st']
    ]),

    simplifyWord: function (sWord) {
        // word simplication before calculating distance between words
        sWord = sWord.toLowerCase();
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80


81

82
83
84
85


86

87
88
89
90
91
92
93
94

95


96
97
98
99


100

101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116






117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

132


133
134
135
136



137
138
139
140
141
142



143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
        ["5", "sgSG"],
        ["6", "bdgBDG"],
        ["7", "ltLT"],
        ["8", "bB"],
        ["9", "gbdGBD"],
        ["0", "oôOÔ"],

        ["a", "aàâáäæ"],
        ["A", "AÀÂÁÄÆ"],
        ["à", "aàâáäæ"],
        ["À", "AÀÂÁÄÆ"],
        ["â", "aàâáäæ"],
        ["Â", "AÀÂÁÄÆ"],
        ["á", "aàâáäæ"],
        ["Á", "AÀÂÁÄÆ"],
        ["ä", "aàâáäæ"],
        ["Ä", "AÀÂÁÄÆ"],

        ["æ", "æéa"],
        ["Æ", "ÆÉA"],



        ["c", "cçskqśŝ"],

        ["C", "CÇSKQŚŜ"],
        ["ç", "cçskqśŝ"],
        ["Ç", "CÇSKQŚŜ"],



        ["e", "eéèêëœ"],

        ["E", "EÉÈÊËŒ"],
        ["é", "eéèêëœ"],
        ["É", "EÉÈÊËŒ"],
        ["ê", "eéèêëœ"],
        ["Ê", "EÉÈÊËŒ"],
        ["è", "eéèêëœ"],
        ["È", "EÉÈÊËŒ"],
        ["ë", "eéèêëœ"],

        ["Ë", "EÉÈÊËŒ"],



        ["g", "gj"],
        ["G", "GJ"],
        


        ["i", "iîïyíìÿ"],

        ["I", "IÎÏYÍÌŸ"],
        ["î", "iîïyíìÿ"],
        ["Î", "IÎÏYÍÌŸ"],
        ["ï", "iîïyíìÿ"],
        ["Ï", "IÎÏYÍÌŸ"],
        ["í", "iîïyíìÿ"],
        ["Í", "IÎÏYÍÌŸ"],
        ["ì", "iîïyíìÿ"],
        ["Ì", "IÎÏYÍÌŸ"],

        ["j", "jg"],
        ["J", "JG"],

        ["k", "kcq"],
        ["K", "KCQ"],







        ["n", "nñ"],
        ["N", "NÑ"],

        ["o", "oôóòöœ"],
        ["O", "OÔÓÒÖŒ"],
        ["ô", "oôóòöœ"],
        ["Ô", "OÔÓÒÖŒ"],
        ["ó", "oôóòöœ"],
        ["Ó", "OÔÓÒÖŒ"],
        ["ò", "oôóòöœ"],
        ["Ò", "OÔÓÒÖŒ"],
        ["ö", "oôóòöœ"],
        ["Ö", "OÔÓÒÖŒ"],

        ["œ", "œoôeéèêë"],

        ["Œ", "ŒOÔEÉÈÊË"],



        ["q", "qck"],
        ["Q", "QCK"],




        ["s", "sśŝcç"],
        ["S", "SŚŜCÇ"],
        ["ś", "sśŝcç"],
        ["Ś", "SŚŜCÇ"],
        ["ŝ", "sśŝcç"],
        ["Ŝ", "SŚŜCÇ"],




        ["u", "uûùüú"],
        ["U", "UÛÙÜÚ"],
        ["û", "uûùüú"],
        ["Û", "UÛÙÜÚ"],
        ["ù", "uûùüú"],
        ["Ù", "UÛÙÜÚ"],
        ["ü", "uûùüú"],
        ["Ü", "UÛÙÜÚ"],
        ["ú", "uûùüú"],
        ["Ú", "UÛÙÜÚ"],

        ["v", "vw"],
        ["V", "VW"],

        ["w", "wv"],
        ["W", "WV"],

        ["x", "xck"],
        ["X", "XCK"],

        ["y", "yÿŷýỳ"],
        ["Y", "YŸŶÝỲ"],
        ["ÿ", "yÿŷýỳ"],
        ["Ÿ", "YŸŶÝỲ"],
        ["ŷ", "yÿŷýỳ"],
        ["Ŷ", "YŸŶÝỲ"],
        ["ý", "yÿŷýỳ"],
        ["Ý", "YŸŶÝỲ"],
        ["ỳ", "yÿŷýỳ"],
        ["Ỳ", "YŸŶÝỲ"],

        ["z", "zs"],
        ["Z", "ZS"],
    ]),

    d1toX: new Map([
        ["æ", ["ae",]],
        ["Æ", ["AE",]],
        ["b", ["bb",]],
        ["B", ["BB",]],







|
|
|
|
|
|
|
|
|
|

|
|

>
>
|
>
|
|
|

>
>
|
>
|
|
|
|
|
|
|
|
>
|
>
>

|
|

>
>
|
>
|
|
|
|
|
|
|
|
|

|
|

|
|

>
>
>
>
>
>
|
|

|
|
|
|
|
|
|
|
|
|

|
>
|
>
>

|
|

>
>
>
|
|
|
|
|
|
>
>
>

|
|
|
|
|
|
|
|
|
|

|
|

|
|

|
|

|
|
|
|
|
|
|
|
|
|

|
|







60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
        ["5", "sgSG"],
        ["6", "bdgBDG"],
        ["7", "ltLT"],
        ["8", "bB"],
        ["9", "gbdGBD"],
        ["0", "oôOÔ"],

        ["a", "aAàÀâÂáÁäÄāĀæÆ"],
        ["A", "AaÀàÂâÁáÄäĀāÆæ"],
        ["à", "aAàÀâÂáÁäÄāĀæÆ"],
        ["À", "AaÀàÂâÁáÄäĀāÆæ"],
        ["â", "aAàÀâÂáÁäÄāĀæÆ"],
        ["Â", "AaÀàÂâÁáÄäĀāÆæ"],
        ["á", "aAàÀâÂáÁäÄāĀæÆ"],
        ["Á", "AaÀàÂâÁáÄäĀāÆæ"],
        ["ä", "aAàÀâÂáÁäÄāĀæÆ"],
        ["Ä", "AaÀàÂâÁáÄäĀāÆæ"],

        ["æ", "æÆéÉaA"],
        ["Æ", "ÆæÉéAa"],

        ["b", "bB"],
        ["B", "Bb"],

        ["c", "cCçÇsSkKqQśŚŝŜ"],
        ["C", "CcÇçSsKkQqŚśŜŝ"],
        ["ç", "cCçÇsSkKqQśŚŝŜ"],
        ["Ç", "CcÇçSsKkQqŚśŜŝ"],

        ["d", "dDðÐ"],
        ["D", "DdÐð"],

        ["e", "eEéÉèÈêÊëËēĒœŒ"],
        ["E", "EeÉéÈèÊêËëĒēŒœ"],
        ["é", "eEéÉèÈêÊëËēĒœŒ"],
        ["É", "EeÉéÈèÊêËëĒēŒœ"],
        ["ê", "eEéÉèÈêÊëËēĒœŒ"],
        ["Ê", "EeÉéÈèÊêËëĒēŒœ"],
        ["è", "eEéÉèÈêÊëËēĒœŒ"],
        ["È", "EeÉéÈèÊêËëĒēŒœ"],
        ["ë", "eEéÉèÈêÊëËēĒœŒ"],
        ["Ë", "EeÉéÈèÊêËëĒēŒœ"],

        ["f", "fF"],
        ["F", "Ff"],

        ["g", "gGjJĵĴ"],
        ["G", "GgJjĴĵ"],
        
        ["h", "hH"],
        ["H", "Hh"],

        ["i", "iIîÎïÏyYíÍìÌīĪÿŸ"],
        ["I", "IiÎîÏïYyÍíÌìĪīŸÿ"],
        ["î", "iIîÎïÏyYíÍìÌīĪÿŸ"],
        ["Î", "IiÎîÏïYyÍíÌìĪīŸÿ"],
        ["ï", "iIîÎïÏyYíÍìÌīĪÿŸ"],
        ["Ï", "IiÎîÏïYyÍíÌìĪīŸÿ"],
        ["í", "iIîÎïÏyYíÍìÌīĪÿŸ"],
        ["Í", "IiÎîÏïYyÍíÌìĪīŸÿ"],
        ["ì", "iIîÎïÏyYíÍìÌīĪÿŸ"],
        ["Ì", "IiÎîÏïYyÍíÌìĪīŸÿ"],

        ["j", "jJgGĵĴ"],
        ["J", "JjGgĴĵ"],

        ["k", "kKcCqQ"],
        ["K", "KkCcQq"],

        ["l", "lLłŁ"],
        ["L", "LlŁł"],

        ["m", "mMḿḾ"],
        ["M", "MmḾḿ"],

        ["n", "nNñÑńŃǹǸ"],
        ["N", "NnÑñŃńǸǹ"],

        ["o", "oOôÔóÓòÒöÖōŌœŒ"],
        ["O", "OoÔôÓóÒòÖöŌōŒœ"],
        ["ô", "oOôÔóÓòÒöÖōŌœŒ"],
        ["Ô", "OoÔôÓóÒòÖöŌōŒœ"],
        ["ó", "oOôÔóÓòÒöÖōŌœŒ"],
        ["Ó", "OoÔôÓóÒòÖöŌōŒœ"],
        ["ò", "oOôÔóÓòÒöÖōŌœŒ"],
        ["Ò", "OoÔôÓóÒòÖöŌōŒœ"],
        ["ö", "oOôÔóÓòÒöÖōŌœŒ"],
        ["Ö", "OoÔôÓóÒòÖöŌōŒœ"],

        ["œ", "œŒoOôÔeEéÉèÈêÊëË"],
        ["Œ", "ŒœOoÔôEeÉéÈèÊêËë"],

        ["p", "pPṕṔ"],
        ["P", "PpṔṕ"],

        ["q", "qQcCkK"],
        ["Q", "QqCcKk"],

        ["r", "rRŕŔ"],
        ["R", "RrŔŕ"],

        ["s", "sScCçÇśŚŝŜ"],
        ["S", "SsCcÇ猜Ŝŝ"],
        ["ś", "sScCçÇśŚŝŜ"],
        ["Ś", "SsCcÇ猜Ŝŝ"],
        ["ŝ", "sScCçÇśŚŝŜ"],
        ["Ŝ", "SsCcÇ猜Ŝŝ"],

        ["t", "tT"],
        ["T", "Tt"],

        ["u", "uUûÛùÙüÜúÚūŪ"],
        ["U", "UuÛûÙùÜüÚúŪū"],
        ["û", "uUûÛùÙüÜúÚūŪ"],
        ["Û", "UuÛûÙùÜüÚúŪū"],
        ["ù", "uUûÛùÙüÜúÚūŪ"],
        ["Ù", "UuÛûÙùÜüÚúŪū"],
        ["ü", "uUûÛùÙüÜúÚūŪ"],
        ["Ü", "UuÛûÙùÜüÚúŪū"],
        ["ú", "uUûÛùÙüÜúÚūŪ"],
        ["Ú", "UuÛûÙùÜüÚúŪū"],

        ["v", "vVwW"],
        ["V", "VvWw"],

        ["w", "wWvV"],
        ["W", "WwVv"],

        ["x", "xXcCkK"],
        ["X", "XxCcKk"],

        ["y", "yYiIîÎÿŸŷŶýÝỲȳȲ"],
        ["Y", "YyIiÎîŸÿŶŷÝýỳȲȳ"],
        ["ÿ", "yYiIîÎÿŸŷŶýÝỲȳȲ"],
        ["Ÿ", "YyIiÎîŸÿŶŷÝýỳȲȳ"],
        ["ŷ", "yYiIîÎÿŸŷŶýÝỲȳȲ"],
        ["Ŷ", "YyIiÎîŸÿŶŷÝýỳȲȳ"],
        ["ý", "yYiIîÎÿŸŷŶýÝỲȳȲ"],
        ["Ý", "YyIiÎîŸÿŶŷÝýỳȲȳ"],
        ["ỳ", "yYiIîÎÿŸŷŶýÝỲȳȲ"],
        ["Ỳ", "YyIiÎîŸÿŶŷÝýỳȲȳ"],

        ["z", "zZsSẑẐźŹ"],
        ["Z", "ZzSsẐẑŹź"],
    ]),

    d1toX: new Map([
        ["æ", ["ae",]],
        ["Æ", ["AE",]],
        ["b", ["bb",]],
        ["B", ["BB",]],
332
333
334
335
336
337
338







339
340
341

342
343
344
345
346
347
348
349
350
    aPfx2: new Set([
        "belgo", "franco", "génito", "gynéco", "médico", "russo"
    ]),


    cut: function (sWord) {
        // returns an arry of strings (prefix, trimed_word, suffix)







        let m = /^([a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st]+)(-(?:t-|)(?:ils?|elles|on|je|tu|nous|vous)$)/.exec(sWord);
        if (m) {
            return ["", m[1], m[2]];

        }
        return ["", sWord, ""];
    },

    // Other functions
    filterSugg: function (aSugg) {
        return aSugg.filter((sSugg) => { return !sSugg.endsWith("è") && !sSugg.endsWith("È"); });
    }








>
>
>
>
>
>
>
|

|
>

|







359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
    aPfx2: new Set([
        "belgo", "franco", "génito", "gynéco", "médico", "russo"
    ]),


    cut: function (sWord) {
        // returns an arry of strings (prefix, trimed_word, suffix)
        let sPrefix = "";
        let sSuffix = "";
        let m = /^([ldmtsnjcç]|lorsqu|presqu|jusqu|puisqu|quoiqu|quelqu|qu)[’'‘`]([a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st-]+)/i.exec(sWord);
        if (m) {
            sPrefix = m[1] + "’";
            sWord = m[2];
        }
        m = /^([a-zA-Zà-öÀ-Ö0-9_ø-ÿØ-ßĀ-ʯfi-st]+)(-(?:t-|)(?:ils?|elles?|on|je|tu|nous|vous|ce)$)/i.exec(sWord);
        if (m) {
            sWord = m[1];
            sSuffix = m[2];
        }
        return [sPrefix, sWord, sSuffix];
    },

    // Other functions
    filterSugg: function (aSugg) {
        return aSugg.filter((sSugg) => { return !sSugg.endsWith("è") && !sSugg.endsWith("È"); });
    }

Modified graphspell-js/ibdawg.js from [d566558346] to [241ce099fe].

59
60
61
62
63
64
65
66



67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
        if (this.dSugg.get(0).length) {
            // we sort the better results with the original word
            let dDistTemp = new Map();
            lRes.forEach((sSugg) => { dDistTemp.set(sSugg, str_transform.distanceDamerauLevenshtein(this.sWord, sSugg)); });
            lRes = lRes.sort((sA, sB) => { return dDistTemp.get(sA) - dDistTemp.get(sB); });
            dDistTemp.clear();
        }
        for (let lSugg of this.dSugg.values()) {



            for (let sSugg of lSugg) { lRes.push(sSugg); }
            if (lRes.length > nSuggLimit) {
                break;
            }
        }
        lRes = char_player.filterSugg(lRes);
        if (this.sWord.gl_isTitle()) {
            lRes = lRes.map((sSugg) => { return sSugg.gl_toCapitalize(); });
        }
        else if (this.sWord.gl_isUpperCase()) {
            lRes = lRes.map((sSugg) => { return sSugg.toUpperCase(); });
        }
        return lRes.slice(0, nSuggLimit);
    }

    reset () {
        this.aSugg.clear();
        this.dSugg.clear();
    }







|
>
>
>
|





|
|

|
|








59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
        if (this.dSugg.get(0).length) {
            // we sort the better results with the original word
            let dDistTemp = new Map();
            lRes.forEach((sSugg) => { dDistTemp.set(sSugg, str_transform.distanceDamerauLevenshtein(this.sWord, sSugg)); });
            lRes = lRes.sort((sA, sB) => { return dDistTemp.get(sA) - dDistTemp.get(sB); });
            dDistTemp.clear();
        }
        for (let [nDist, lSugg] of this.dSugg.entries()) {
            if (nDist > this.nDistLimit) {
                break;
            }
            lRes.push(...lSugg);
            if (lRes.length > nSuggLimit) {
                break;
            }
        }
        lRes = char_player.filterSugg(lRes);
        if (this.sWord.gl_isUpperCase()) {
            lRes = lRes.map((sSugg) => { return sSugg.toUpperCase(); });
        }
        else if (this.sWord.slice(0,1).gl_isUpperCase()) {
            lRes = lRes.map((sSugg) => { return sSugg.slice(0,1).toUpperCase() + sSugg.slice(1); });
        }
        return lRes.slice(0, nSuggLimit);
    }

    reset () {
        this.aSugg.clear();
        this.dSugg.clear();
    }
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
                this._getArcs = this._getArcs3;
                this._writeNodes = this._writeNodes3;
                break;
            default:
                throw ValueError("# Error: unknown code: " + this.nCompressionMethod);
        }
        //console.log(this.getInfo());
        this.bOptNumSigle = true;
        this.bOptNumAtLast = false;
    }

    getInfo () {
        return  `  Language: ${this.sLangName}   Lang code: ${this.sLangCode}   Dictionary name: ${this.sDicName}\n` +
                `  Compression method: ${this.nCompressionMethod}   Date: ${this.sDate}   Stemming: ${this.cStemming}FX\n` +
                `  Arcs values:  ${this.nArcVal} = ${this.nChar} characters,  ${this.nAff} affixes,  ${this.nTag} tags\n` +
                `  Dictionary: ${this.nEntry} entries,    ${this.nNode} nodes,   ${this.nArc} arcs\n` +







|
|







173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
                this._getArcs = this._getArcs3;
                this._writeNodes = this._writeNodes3;
                break;
            default:
                throw ValueError("# Error: unknown code: " + this.nCompressionMethod);
        }
        //console.log(this.getInfo());
        this.bAcronymValid = true;
        this.bNumAtLastValid = false;
    }

    getInfo () {
        return  `  Language: ${this.sLangName}   Lang code: ${this.sLangCode}   Dictionary name: ${this.sDicName}\n` +
                `  Compression method: ${this.nCompressionMethod}   Date: ${this.sDate}   Stemming: ${this.cStemming}FX\n` +
                `  Arcs values:  ${this.nArcVal} = ${this.nChar} characters,  ${this.nAff} affixes,  ${this.nTag} tags\n` +
                `  Dictionary: ${this.nEntry} entries,    ${this.nNode} nodes,   ${this.nArc} arcs\n` +
221
222
223
224
225
226
227



228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259



260
261
262
263
264
265
266
        }
        if (sToken.includes("-")) {
            if (sToken.gl_count("-") > 4) {
                return true;
            }
            return sToken.split("-").every(sWord  =>  this.isValid(sWord)); 
        }



        return false;
    }

    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (!sWord) {
            return null;
        }
        if (sWord.includes("’")) { // ugly hack
            sWord = sWord.replace("’", "'");
        }
        if (this.lookup(sWord)) {
            return true;
        }
        if (sWord.gl_isDigit()) {
            return true;
        }
        if (sWord.charAt(0).gl_isUpperCase()) {
            if (sWord.length > 1) {
                if (sWord.gl_isTitle()) {
                    return !!this.lookup(sWord.toLowerCase());
                }
                if (sWord.gl_isUpperCase()) {
                    if (this.bOptNumSigle) {
                        return true;
                    }
                    return !!(this.lookup(sWord.toLowerCase()) || this.lookup(sWord.gl_toCapitalize()));
                }
                return !!this.lookup(sWord.slice(0, 1).toLowerCase() + sWord.slice(1));
            } else {
                return !!this.lookup(sWord.toLowerCase());
            }



        }
        return false;
    }

    _convBytesToInteger (aBytes) {
        // Byte order = Big Endian (bigger first)
        let nVal = 0;







>
>
>














<
<
<






|








>
>
>







224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247



248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
        }
        if (sToken.includes("-")) {
            if (sToken.gl_count("-") > 4) {
                return true;
            }
            return sToken.split("-").every(sWord  =>  this.isValid(sWord)); 
        }
        if (sToken.includes(".") || sToken.includes("·")) {
            return true;
        }
        return false;
    }

    isValid (sWord) {
        // checks if sWord is valid (different casing tested if the first letter is a capital)
        if (!sWord) {
            return null;
        }
        if (sWord.includes("’")) { // ugly hack
            sWord = sWord.replace("’", "'");
        }
        if (this.lookup(sWord)) {
            return true;
        }



        if (sWord.charAt(0).gl_isUpperCase()) {
            if (sWord.length > 1) {
                if (sWord.gl_isTitle()) {
                    return !!this.lookup(sWord.toLowerCase());
                }
                if (sWord.gl_isUpperCase()) {
                    if (this.bAcronymValid) {
                        return true;
                    }
                    return !!(this.lookup(sWord.toLowerCase()) || this.lookup(sWord.gl_toCapitalize()));
                }
                return !!this.lookup(sWord.slice(0, 1).toLowerCase() + sWord.slice(1));
            } else {
                return !!this.lookup(sWord.toLowerCase());
            }
        }
        if (sWord.slice(0,1).gl_isDigit()) {
            return true;
        }
        return false;
    }

    _convBytesToInteger (aBytes) {
        // Byte order = Big Endian (bigger first)
        let nVal = 0;
305
306
307
308
309
310
311

312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339



340
341
342
343
344
345
346

347




348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
        sWord = char_player.spellingNormalization(sWord)
        let sPfx = "";
        let sSfx = "";
        [sPfx, sWord, sSfx] = char_player.cut(sWord);
        let nMaxSwitch = Math.max(Math.floor(sWord.length / 3), 1);
        let nMaxDel = Math.floor(sWord.length / 5);
        let nMaxHardRepl = Math.max(Math.floor((sWord.length - 5) / 4), 1);

        let oSuggResult = new SuggResult(sWord);
        this._suggest(oSuggResult, sWord, nMaxSwitch, nMaxDel, nMaxHardRepl);
        if (sWord.gl_isTitle()) {
            this._suggest(oSuggResult, sWord.toLowerCase(), nMaxSwitch, nMaxDel, nMaxHardRepl);
        }
        else if (sWord.gl_isLowerCase()) {
            this._suggest(oSuggResult, sWord.gl_toCapitalize(), nMaxSwitch, nMaxDel, nMaxHardRepl);
        }
        let aSugg = oSuggResult.getSuggestions(nSuggLimit);
        if (sSfx || sPfx) {
            // we add what we removed
            return aSugg.map( (sSugg) => { return sPfx + sSugg + sSfx } );
        }
        return aSugg;
    }

    _suggest (oSuggResult, sRemain, nMaxSwitch=0, nMaxDel=0, nMaxHardRepl=0, nDeep=0, iAddr=0, sNewWord="", bAvoidLoop=false) {
        // returns a set of suggestions
        // recursive function
        if (sRemain == "") {
            if (this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask) {
                oSuggResult.addSugg(sNewWord);
            }
            for (let sTail of this._getTails(iAddr)) {
                oSuggResult.addSugg(sNewWord+sTail);
            }
            return;
        }




        let cCurrent = sRemain.slice(0, 1);
        for (let [cChar, jAddr] of this._getCharArcs(iAddr)) {
            if (char_player.d1to1.gl_get(cCurrent, cCurrent).indexOf(cChar) != -1) {
                this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, jAddr, sNewWord+cChar);
            }
            else if (!bAvoidLoop && nMaxHardRepl) {

                this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl-1, nDeep+1, jAddr, sNewWord+cChar, true);




            }
        }
        if (!bAvoidLoop) { // avoid infinite loop
            if (sRemain.length > 1) {
                if (cCurrent == sRemain.slice(1, 2)) {
                    // same char, we remove 1 char without adding 1 to <sNewWord>
                    this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord);
                }
                else {
                    // switching chars
                    if (nMaxSwitch > 0) {
                        this._suggest(oSuggResult, sRemain.slice(1, 2)+sRemain.slice(0, 1)+sRemain.slice(2), nMaxSwitch-1, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true);
                    }
                    // delete char
                    if (nMaxDel > 0) {
                        this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel-1, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true);
                    }
                }
                // Phonetic replacements
                for (let sRepl of char_player.get1toXReplacement(sNewWord.slice(-1), cCurrent, sRemain.slice(1,2))) {
                    this._suggest(oSuggResult, sRepl + sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true);
                }
                for (let sRepl of char_player.d2toX.gl_get(sRemain.slice(0, 2), [])) {
                    this._suggest(oSuggResult, sRepl + sRemain.slice(2), nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true);
                }
            }
            // end of word
            if (sRemain.length == 2) {
                for (let sRepl of char_player.dFinal2.gl_get(sRemain, [])) {
                    this._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true);
                }
            }
            else if (sRemain.length == 1) {
                this._suggest(oSuggResult, "", nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true); // remove last char and go on
                for (let sRepl of char_player.dFinal1.gl_get(sRemain, [])) {
                    this._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, true);
                }
            }
        }
    }

    * _getCharArcs (iAddr) {
        // generator: yield all chars and addresses from node at address <iAddr>







>

|
<
<
<
<
<
<








|











>
>
>




|

|
>
|
>
>
>
>






|




|



|




|


|





|



|

|







311
312
313
314
315
316
317
318
319
320






321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
        sWord = char_player.spellingNormalization(sWord)
        let sPfx = "";
        let sSfx = "";
        [sPfx, sWord, sSfx] = char_player.cut(sWord);
        let nMaxSwitch = Math.max(Math.floor(sWord.length / 3), 1);
        let nMaxDel = Math.floor(sWord.length / 5);
        let nMaxHardRepl = Math.max(Math.floor((sWord.length - 5) / 4), 1);
        let nMaxJump = Math.max(Math.floor(sWord.length / 4), 1);
        let oSuggResult = new SuggResult(sWord);
        this._suggest(oSuggResult, sWord, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump);






        let aSugg = oSuggResult.getSuggestions(nSuggLimit);
        if (sSfx || sPfx) {
            // we add what we removed
            return aSugg.map( (sSugg) => { return sPfx + sSugg + sSfx } );
        }
        return aSugg;
    }

    _suggest (oSuggResult, sRemain, nMaxSwitch=0, nMaxDel=0, nMaxHardRepl=0, nMaxJump=0, nDist=0, nDeep=0, iAddr=0, sNewWord="", bAvoidLoop=false) {
        // returns a set of suggestions
        // recursive function
        if (sRemain == "") {
            if (this._convBytesToInteger(this.byDic.slice(iAddr, iAddr+this.nBytesArc)) & this._finalNodeMask) {
                oSuggResult.addSugg(sNewWord);
            }
            for (let sTail of this._getTails(iAddr)) {
                oSuggResult.addSugg(sNewWord+sTail);
            }
            return;
        }
        if (nDist > oSuggResult.nDistLimit) {
            return;
        }

        let cCurrent = sRemain.slice(0, 1);
        for (let [cChar, jAddr] of this._getCharArcs(iAddr)) {
            if (char_player.d1to1.gl_get(cCurrent, cCurrent).indexOf(cChar) != -1) {
                this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, jAddr, sNewWord+cChar);
            }
            else if (!bAvoidLoop) {
                if (nMaxHardRepl) {
                    this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl-1, nMaxJump, nDist+1, nDeep+1, jAddr, sNewWord+cChar, true);
                }
                if (nMaxJump) {
                    this._suggest(oSuggResult, sRemain, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump-1, nDist+1, nDeep+1, jAddr, sNewWord+cChar, true);
                }
            }
        }
        if (!bAvoidLoop) { // avoid infinite loop
            if (sRemain.length > 1) {
                if (cCurrent == sRemain.slice(1, 2)) {
                    // same char, we remove 1 char without adding 1 to <sNewWord>
                    this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord);
                }
                else {
                    // switching chars
                    if (nMaxSwitch > 0) {
                        this._suggest(oSuggResult, sRemain.slice(1, 2)+sRemain.slice(0, 1)+sRemain.slice(2), nMaxSwitch-1, nMaxDel, nMaxHardRepl, nMaxJump, nDist+1, nDeep+1, iAddr, sNewWord, true);
                    }
                    // delete char
                    if (nMaxDel > 0) {
                        this._suggest(oSuggResult, sRemain.slice(1), nMaxSwitch, nMaxDel-1, nMaxHardRepl, nMaxJump, nDist+1, nDeep+1, iAddr, sNewWord, true);
                    }
                }
                // Phonetic replacements
                for (let sRepl of char_player.get1toXReplacement(sNewWord.slice(-1), cCurrent, sRemain.slice(1,2))) {
                    this._suggest(oSuggResult, sRepl + sRemain.slice(1), nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, true);
                }
                for (let sRepl of char_player.d2toX.gl_get(sRemain.slice(0, 2), [])) {
                    this._suggest(oSuggResult, sRepl + sRemain.slice(2), nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, true);
                }
            }
            // end of word
            if (sRemain.length == 2) {
                for (let sRepl of char_player.dFinal2.gl_get(sRemain, [])) {
                    this._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, true);
                }
            }
            else if (sRemain.length == 1) {
                this._suggest(oSuggResult, "", nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, true); // remove last char and go on
                for (let sRepl of char_player.dFinal1.gl_get(sRemain, [])) {
                    this._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, true);
                }
            }
        }
    }

    * _getCharArcs (iAddr) {
        // generator: yield all chars and addresses from node at address <iAddr>

Modified graphspell-js/spellchecker.js from [327e00c673] to [3df103d578].

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
}


${map}


const dDefaultDictionaries = new Map([
    ["fr", "fr.json"],
    ["en", "en.json"]
]);


class SpellChecker {

    constructor (sLangCode, sPath="", mainDic="", extentedDic="", communityDic="", personalDic="") {







|







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
}


${map}


const dDefaultDictionaries = new Map([
    ["fr", "fr-allvars.json"],
    ["en", "en.json"]
]);


class SpellChecker {

    constructor (sLangCode, sPath="", mainDic="", extentedDic="", communityDic="", personalDic="") {

Modified graphspell/char_player.py from [130907f556] to [73eee3ee03].

15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

_xTransCharsForSimplification = str.maketrans({
    'à': 'a',  'é': 'e',  'î': 'i',  'ô': 'o',  'û': 'u',  'ÿ': 'i',  "y": "i",
    'â': 'a',  'è': 'e',  'ï': 'i',  'ö': 'o',  'ù': 'u',  'ŷ': 'i',
    'ä': 'a',  'ê': 'e',  'í': 'i',  'ó': 'o',  'ü': 'u',  'ý': 'i',
    'á': 'a',  'ë': 'e',  'ì': 'i',  'ò': 'o',  'ú': 'u',  'ỳ': 'i',
    'ā': 'a',  'ē': 'e',  'ī': 'i',  'ō': 'o',  'ū': 'u',  'ȳ': 'i',
    'ñ': 'n',  'k': 'q',  'w': 'v',
    'œ': 'oe',  'æ': 'ae',
    'ſ': 's',  'ffi': 'ffi',  'ffl': 'ffl',  'ff': 'ff',  'ſt': 'ft',  'fi': 'fi',  'fl': 'fl',  'st': 'st', 
})

def simplifyWord (sWord):
    "word simplication before calculating distance between words"
    sWord = sWord.lower().translate(_xTransCharsForSimplification)







|







15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

_xTransCharsForSimplification = str.maketrans({
    'à': 'a',  'é': 'e',  'î': 'i',  'ô': 'o',  'û': 'u',  'ÿ': 'i',  "y": "i",
    'â': 'a',  'è': 'e',  'ï': 'i',  'ö': 'o',  'ù': 'u',  'ŷ': 'i',
    'ä': 'a',  'ê': 'e',  'í': 'i',  'ó': 'o',  'ü': 'u',  'ý': 'i',
    'á': 'a',  'ë': 'e',  'ì': 'i',  'ò': 'o',  'ú': 'u',  'ỳ': 'i',
    'ā': 'a',  'ē': 'e',  'ī': 'i',  'ō': 'o',  'ū': 'u',  'ȳ': 'i',
    'ç': 'c',  'ñ': 'n',  'k': 'q',  'w': 'v',
    'œ': 'oe',  'æ': 'ae',
    'ſ': 's',  'ffi': 'ffi',  'ffl': 'ffl',  'ff': 'ff',  'ſt': 'ft',  'fi': 'fi',  'fl': 'fl',  'st': 'st', 
})

def simplifyWord (sWord):
    "word simplication before calculating distance between words"
    sWord = sWord.lower().translate(_xTransCharsForSimplification)
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69


70

71
72
73
74


75

76
77
78
79
80
81
82
83
84

85



86
87
88
89

90


91
92
93
94
95
96
97
98
99
100
101
102
103
104
105






106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

121


122
123
124
125



126
127
128
129
130
131
132
133
134
135

136




137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
    "5": "sgSG",
    "6": "bdgBDG",
    "7": "ltLT",
    "8": "bB",
    "9": "gbdGBD",
    "0": "oôOÔ",

    "a": "aàâáäæ",
    "A": "AÀÂÁÄÆ",
    "à": "aàâáäæ",
    "À": "AÀÂÁÄÆ",
    "â": "aàâáäæ",
    "Â": "AÀÂÁÄÆ",
    "á": "aàâáäæ",
    "Á": "AÀÂÁÄÆ",
    "ä": "aàâáäæ",
    "Ä": "AÀÂÁÄÆ",

    "æ": "æéa",
    "Æ": "ÆÉA",



    "c": "cçskqśŝ",

    "C": "CÇSKQŚŜ",
    "ç": "cçskqśŝ",
    "Ç": "CÇSKQŚŜ",



    "e": "eéèêëœ",

    "E": "EÉÈÊËŒ",
    "é": "eéèêëœ",
    "É": "EÉÈÊËŒ",
    "ê": "eéèêëœ",
    "Ê": "EÉÈÊËŒ",
    "è": "eéèêëœ",
    "È": "EÉÈÊËŒ",
    "ë": "eéèêëœ",
    "Ë": "EÉÈÊËŒ",





    "g": "gj",
    "G": "GJ",
    
    "i": "iîïyíìÿ",

    "I": "IÎÏYÍÌŸ",


    "î": "iîïyíìÿ",
    "Î": "IÎÏYÍÌŸ",
    "ï": "iîïyíìÿ",
    "Ï": "IÎÏYÍÌŸ",
    "í": "iîïyíìÿ",
    "Í": "IÎÏYÍÌŸ",
    "ì": "iîïyíìÿ",
    "Ì": "IÎÏYÍÌŸ",

    "j": "jg",
    "J": "JG",

    "k": "kcq",
    "K": "KCQ",







    "n": "nñ",
    "N": "NÑ",

    "o": "oôóòöœ",
    "O": "OÔÓÒÖŒ",
    "ô": "oôóòöœ",
    "Ô": "OÔÓÒÖŒ",
    "ó": "oôóòöœ",
    "Ó": "OÔÓÒÖŒ",
    "ò": "oôóòöœ",
    "Ò": "OÔÓÒÖŒ",
    "ö": "oôóòöœ",
    "Ö": "OÔÓÒÖŒ",

    "œ": "œoôeéèêë",

    "Œ": "ŒOÔEÉÈÊË",



    "q": "qck",
    "Q": "QCK",




    "s": "sśŝcç",
    "S": "SŚŜCÇ",
    "ś": "sśŝcç",
    "Ś": "SŚŜCÇ",
    "ŝ": "sśŝcç",
    "Ŝ": "SŚŜCÇ",

    "u": "uûùüú",
    "U": "UÛÙÜÚ",
    "û": "uûùüú",

    "Û": "UÛÙÜÚ",




    "ù": "uûùüú",
    "Ù": "UÛÙÜÚ",
    "ü": "uûùüú",
    "Ü": "UÛÙÜÚ",
    "ú": "uûùüú",
    "Ú": "UÛÙÜÚ",

    "v": "vw",
    "V": "VW",

    "w": "wv",
    "W": "WV",

    "x": "xck",
    "X": "XCK",

    "y": "yÿŷýỳ",
    "Y": "YŸŶÝỲ",
    "ÿ": "yÿŷýỳ",
    "Ÿ": "YŸŶÝỲ",
    "ŷ": "yÿŷýỳ",
    "Ŷ": "YŸŶÝỲ",
    "ý": "yÿŷýỳ",
    "Ý": "YŸŶÝỲ",
    "ỳ": "yÿŷýỳ",
    "Ỳ": "YŸŶÝỲ",

    "z": "zs",
    "Z": "ZS",
}

d1toX = {
    "æ": ("ae",),
    "Æ": ("AE",),
    "b": ("bb",),
    "B": ("BB",),







|
|
|
|
|
|
|
|
|
|

|
|

>
>
|
>
|
|
|

>
>
|
>
|
|
|
|
|
|
|
|
<
>

>
>
>
|
|

|
>
|
>
>
|
|
|
|
|
|
|
|

|
|

|
|

>
>
>
>
>
>
|
|

|
|
|
|
|
|
|
|
|
|

|
>
|
>
>

|
|

>
>
>
|
|
|
|
|
|

|
<
<
>
|
>
>
>
>
|
|
|
|
|
|

|
|

|
|

|
|

|
|
|
|
|
|
|
|
|
|

|
|







49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89

90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157


158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
    "5": "sgSG",
    "6": "bdgBDG",
    "7": "ltLT",
    "8": "bB",
    "9": "gbdGBD",
    "0": "oôOÔ",

    "a": "aAàÀâÂáÁäÄāĀæÆ",
    "A": "AaÀàÂâÁáÄäĀāÆæ",
    "à": "aAàÀâÂáÁäÄāĀæÆ",
    "À": "AaÀàÂâÁáÄäĀāÆæ",
    "â": "aAàÀâÂáÁäÄāĀæÆ",
    "Â": "AaÀàÂâÁáÄäĀāÆæ",
    "á": "aAàÀâÂáÁäÄāĀæÆ",
    "Á": "AaÀàÂâÁáÄäĀāÆæ",
    "ä": "aAàÀâÂáÁäÄāĀæÆ",
    "Ä": "AaÀàÂâÁáÄäĀāÆæ",

    "æ": "æÆéÉaA",
    "Æ": "ÆæÉéAa",

    "b": "bB",
    "B": "Bb",

    "c": "cCçÇsSkKqQśŚŝŜ",
    "C": "CcÇçSsKkQqŚśŜŝ",
    "ç": "cCçÇsSkKqQśŚŝŜ",
    "Ç": "CcÇçSsKkQqŚśŜŝ",

    "d": "dDðÐ",
    "D": "DdÐð",

    "e": "eEéÉèÈêÊëËēĒœŒ",
    "E": "EeÉéÈèÊêËëĒēŒœ",
    "é": "eEéÉèÈêÊëËēĒœŒ",
    "É": "EeÉéÈèÊêËëĒēŒœ",
    "ê": "eEéÉèÈêÊëËēĒœŒ",
    "Ê": "EeÉéÈèÊêËëĒēŒœ",
    "è": "eEéÉèÈêÊëËēĒœŒ",
    "È": "EeÉéÈèÊêËëĒēŒœ",
    "ë": "eEéÉèÈêÊëËēĒœŒ",

    "Ë": "EeÉéÈèÊêËëĒēŒœ",

    "f": "fF",
    "F": "Ff",

    "g": "gGjJĵĴ",
    "G": "GgJjĴĵ",
    
    "h": "hH",
    "H": "Hh",

    "i": "iIîÎïÏyYíÍìÌīĪÿŸ",
    "I": "IiÎîÏïYyÍíÌìĪīŸÿ",
    "î": "iIîÎïÏyYíÍìÌīĪÿŸ",
    "Î": "IiÎîÏïYyÍíÌìĪīŸÿ",
    "ï": "iIîÎïÏyYíÍìÌīĪÿŸ",
    "Ï": "IiÎîÏïYyÍíÌìĪīŸÿ",
    "í": "iIîÎïÏyYíÍìÌīĪÿŸ",
    "Í": "IiÎîÏïYyÍíÌìĪīŸÿ",
    "ì": "iIîÎïÏyYíÍìÌīĪÿŸ",
    "Ì": "IiÎîÏïYyÍíÌìĪīŸÿ",

    "j": "jJgGĵĴ",
    "J": "JjGgĴĵ",

    "k": "kKcCqQ",
    "K": "KkCcQq",

    "l": "lLłŁ",
    "L": "LlŁł",

    "m": "mMḿḾ",
    "M": "MmḾḿ",

    "n": "nNñÑńŃǹǸ",
    "N": "NnÑñŃńǸǹ",

    "o": "oOôÔóÓòÒöÖōŌœŒ",
    "O": "OoÔôÓóÒòÖöŌōŒœ",
    "ô": "oOôÔóÓòÒöÖōŌœŒ",
    "Ô": "OoÔôÓóÒòÖöŌōŒœ",
    "ó": "oOôÔóÓòÒöÖōŌœŒ",
    "Ó": "OoÔôÓóÒòÖöŌōŒœ",
    "ò": "oOôÔóÓòÒöÖōŌœŒ",
    "Ò": "OoÔôÓóÒòÖöŌōŒœ",
    "ö": "oOôÔóÓòÒöÖōŌœŒ",
    "Ö": "OoÔôÓóÒòÖöŌōŒœ",

    "œ": "œŒoOôÔeEéÉèÈêÊëË",
    "Œ": "ŒœOoÔôEeÉéÈèÊêËë",

    "p": "pPṕṔ",
    "P": "PpṔṕ",

    "q": "qQcCkK",
    "Q": "QqCcKk",

    "r": "rRŕŔ",
    "R": "RrŔŕ",

    "s": "sScCçÇśŚŝŜ",
    "S": "SsCcÇ猜Ŝŝ",
    "ś": "sScCçÇśŚŝŜ",
    "Ś": "SsCcÇ猜Ŝŝ",
    "ŝ": "sScCçÇśŚŝŜ",
    "Ŝ": "SsCcÇ猜Ŝŝ",

    "t": "tT",


    "T": "Tt",

    "u": "uUûÛùÙüÜúÚūŪ",
    "U": "UuÛûÙùÜüÚúŪū",
    "û": "uUûÛùÙüÜúÚūŪ",
    "Û": "UuÛûÙùÜüÚúŪū",
    "ù": "uUûÛùÙüÜúÚūŪ",
    "Ù": "UuÛûÙùÜüÚúŪū",
    "ü": "uUûÛùÙüÜúÚūŪ",
    "Ü": "UuÛûÙùÜüÚúŪū",
    "ú": "uUûÛùÙüÜúÚūŪ",
    "Ú": "UuÛûÙùÜüÚúŪū",

    "v": "vVwW",
    "V": "VvWw",

    "w": "wWvV",
    "W": "WwVv",

    "x": "xXcCkK",
    "X": "XxCcKk",

    "y": "yYiIîÎÿŸŷŶýÝỲȳȲ",
    "Y": "YyIiÎîŸÿŶŷÝýỳȲȳ",
    "ÿ": "yYiIîÎÿŸŷŶýÝỲȳȲ",
    "Ÿ": "YyIiÎîŸÿŶŷÝýỳȲȳ",
    "ŷ": "yYiIîÎÿŸŷŶýÝỲȳȲ",
    "Ŷ": "YyIiÎîŸÿŶŷÝýỳȲȳ",
    "ý": "yYiIîÎÿŸŷŶýÝỲȳȲ",
    "Ý": "YyIiÎîŸÿŶŷÝýỳȲȳ",
    "ỳ": "yYiIîÎÿŸŷŶýÝỲȳȲ",
    "Ỳ": "YyIiÎîŸÿŶŷÝýỳȲȳ",

    "z": "zZsSẑẐźŹ",
    "Z": "ZzSsẐẑŹź",
}

d1toX = {
    "æ": ("ae",),
    "Æ": ("AE",),
    "b": ("bb",),
    "B": ("BB",),
321
322
323
324
325
326
327

328
329
330
331


332
333




334

335
336
337
338
339
340
341
342
    "pseudo", "pré", "re", "ré", "sans", "sous", "supra", "sur", "ultra"
])
aPfx2 = frozenset([
    "belgo", "franco", "génito", "gynéco", "médico", "russo"
])



_zMotAvecPronom = re.compile("^(?i)(\\w+)(-(?:t-|)(?:ils?|elles?|on|je|tu|nous|vous))$")

def cut (sWord):
    "returns a tuple of strings (prefix, trimed_word, suffix)"


    m = _zMotAvecPronom.search(sWord)
    if m:




        return ("", m.group(1), m.group(2))

    return ("", sWord, "")


# Other functions

def filterSugg (aSugg):
    "exclude suggestions"
    return filter(lambda sSugg: not sSugg.endswith(("è", "È")), aSugg)







>
|



>
>
|

>
>
>
>
|
>
|







348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
    "pseudo", "pré", "re", "ré", "sans", "sous", "supra", "sur", "ultra"
])
aPfx2 = frozenset([
    "belgo", "franco", "génito", "gynéco", "médico", "russo"
])


_zWordPrefixes = re.compile("(?i)^([ldmtsnjcç]|lorsqu|presqu|jusqu|puisqu|quoiqu|quelqu|qu)[’'‘`]([\\w-]+)")
_zWordSuffixes = re.compile("(?i)^(\\w+)(-(?:t-|)(?:ils?|elles?|on|je|tu|nous|vous|ce))$")

def cut (sWord):
    "returns a tuple of strings (prefix, trimed_word, suffix)"
    sPrefix = ""
    sSuffix = ""
    m = _zWordPrefixes.search(sWord)
    if m:
        sPrefix = m.group(1) + "’"
        sWord = m.group(2)
    m = _zWordSuffixes.search(sWord)
    if m:
        sWord = m.group(1)
        sSuffix = m.group(2)
    return (sPrefix, sWord, sSuffix)


# Other functions

def filterSugg (aSugg):
    "exclude suggestions"
    return filter(lambda sSugg: not sSugg.endswith(("è", "È")), aSugg)

Modified graphspell/dawg.py from [63684196d2] to [eb988983d4].

10
11
12
13
14
15
16


17
18
19
20
21
22
23


import sys
import os
import collections
import json
import time



from . import str_transform as st
from .progressbar import ProgressBar



def readFile (spf):







>
>







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25


import sys
import os
import collections
import json
import time
import re
import traceback

from . import str_transform as st
from .progressbar import ProgressBar



def readFile (spf):
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61







62
63
64
65
66
67

68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94

95
96
97
98
99
100
101
    """DIRECT ACYCLIC WORD GRAPH"""
    # This code is inspired from Steve Hanov’s DAWG, 2011. (http://stevehanov.ca/blog/index.php?id=115)
    # We store suffix/affix codes and tags within the graph after the “real” word.
    # A word is a list of numbers [ c1, c2, c3 . . . cN, iAffix, iTags]
    # Each arc is an index in self.lArcVal, where are stored characters, suffix/affix codes for stemming and tags.
    # Important: As usual, the last node (after ‘iTags’) is tagged final, AND the node after ‘cN’ is ALSO tagged final.

    def __init__ (self, src, cStemming, sLangCode, sLangName="", sDicName=""):
        print("===== Direct Acyclic Word Graph - Minimal Acyclic Finite State Automaton =====")
        cStemming = cStemming.upper()
        if cStemming == "A":
            funcStemmingGen = st.defineAffixCode
        elif cStemming == "S":
            funcStemmingGen = st.defineSuffixCode
        elif cStemming == "N":
            funcStemmingGen = st.noStemming
        else:
            raise ValueError("# Error. Unknown stemming code: {}".format(cStemming))

        aEntry = set()
        lChar = ['']; dChar = {}; nChar = 1; dCharOccur = {}
        lAff  = [];   dAff  = {}; nAff  = 0; dAffOccur = {}
        lTag  = [];   dTag  = {}; nTag  = 0; dTagOccur = {}
        nErr = 0








        # read lexicon
        if type(src) is str:
            iterable = readFile(src)
        else:
            iterable = src
        for sFlex, sStem, sTag in iterable:

            addWordToCharDict(sFlex)
            # chars
            for c in sFlex:
                if c not in dChar:
                    dChar[c] = nChar
                    lChar.append(c)
                    nChar += 1
                dCharOccur[c] = dCharOccur.get(c, 0) + 1
            # affixes to find stem from flexion
            sAff = funcStemmingGen(sFlex, sStem)
            if sAff not in dAff:
                dAff[sAff] = nAff
                lAff.append(sAff)
                nAff += 1
            dAffOccur[sAff] = dCharOccur.get(sAff, 0) + 1
            # tags
            if sTag not in dTag:
                dTag[sTag] = nTag
                lTag.append(sTag)
                nTag += 1
            dTagOccur[sTag] = dTagOccur.get(sTag, 0) + 1
            aEntry.add((sFlex, dAff[sAff], dTag[sTag]))
        if not aEntry:
            raise ValueError("# Error. Empty lexicon")
        
        # Preparing DAWG
        print(" > Preparing list of words")

        lVal = lChar + lAff + lTag
        lWord = [ [dChar[c] for c in sFlex] + [iAff+nChar] + [iTag+nChar+nAff]  for sFlex, iAff, iTag in aEntry ]
        aEntry = None
        
        # Dictionary of arc values occurrency, to sort arcs of each node
        dValOccur = dict( [ (dChar[c], dCharOccur[c])  for c in dChar ] \
                        + [ (dAff[aff]+nChar, dAffOccur[aff]) for aff in dAff ] \







|

















>
>
>
>
>
>
>






>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|





>







39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    """DIRECT ACYCLIC WORD GRAPH"""
    # This code is inspired from Steve Hanov’s DAWG, 2011. (http://stevehanov.ca/blog/index.php?id=115)
    # We store suffix/affix codes and tags within the graph after the “real” word.
    # A word is a list of numbers [ c1, c2, c3 . . . cN, iAffix, iTags]
    # Each arc is an index in self.lArcVal, where are stored characters, suffix/affix codes for stemming and tags.
    # Important: As usual, the last node (after ‘iTags’) is tagged final, AND the node after ‘cN’ is ALSO tagged final.

    def __init__ (self, src, cStemming, sLangCode, sLangName="", sDicName="", sSelectFilterRegex=""):
        print("===== Direct Acyclic Word Graph - Minimal Acyclic Finite State Automaton =====")
        cStemming = cStemming.upper()
        if cStemming == "A":
            funcStemmingGen = st.defineAffixCode
        elif cStemming == "S":
            funcStemmingGen = st.defineSuffixCode
        elif cStemming == "N":
            funcStemmingGen = st.noStemming
        else:
            raise ValueError("# Error. Unknown stemming code: {}".format(cStemming))

        aEntry = set()
        lChar = ['']; dChar = {}; nChar = 1; dCharOccur = {}
        lAff  = [];   dAff  = {}; nAff  = 0; dAffOccur = {}
        lTag  = [];   dTag  = {}; nTag  = 0; dTagOccur = {}
        nErr = 0

        try:
            zFilter = re.compile(sSelectFilterRegex)  if sSelectFilterRegex  else None
        except:
            print(" # Error. Wrong filter regex. Filter ignored.")
            traceback.print_exc()
            zFilter = None

        # read lexicon
        if type(src) is str:
            iterable = readFile(src)
        else:
            iterable = src
        for sFlex, sStem, sTag in iterable:
            if not zFilter or zFilter.search(sTag):
                addWordToCharDict(sFlex)
                # chars
                for c in sFlex:
                    if c not in dChar:
                        dChar[c] = nChar
                        lChar.append(c)
                        nChar += 1
                    dCharOccur[c] = dCharOccur.get(c, 0) + 1
                # affixes to find stem from flexion
                sAff = funcStemmingGen(sFlex, sStem)
                if sAff not in dAff:
                    dAff[sAff] = nAff
                    lAff.append(sAff)
                    nAff += 1
                dAffOccur[sAff] = dCharOccur.get(sAff, 0) + 1
                # tags
                if sTag not in dTag:
                    dTag[sTag] = nTag
                    lTag.append(sTag)
                    nTag += 1
                dTagOccur[sTag] = dTagOccur.get(sTag, 0) + 1
                aEntry.add((sFlex, dAff[sAff], dTag[sTag]))
        if not aEntry:
            raise ValueError("# Error. Empty lexicon")
        
        # Preparing DAWG
        print(" > Preparing list of words")
        print(" Filter: " + (sSelectFilterRegex or "[None]"))
        lVal = lChar + lAff + lTag
        lWord = [ [dChar[c] for c in sFlex] + [iAff+nChar] + [iTag+nChar+nAff]  for sFlex, iAff, iTag in aEntry ]
        aEntry = None
        
        # Dictionary of arc values occurrency, to sort arcs of each node
        dValOccur = dict( [ (dChar[c], dCharOccur[c])  for c in dChar ] \
                        + [ (dAff[aff]+nChar, dAffOccur[aff]) for aff in dAff ] \

Modified graphspell/ibdawg.py from [e811d4895f] to [a255097656].

56
57
58
59
60
61
62
63
64
65
66

67

68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
                self.aSugg.add(sSugg)
                if nDist < self.nMinDist:
                    self.nMinDist = nDist
                self.nDistLimit = min(self.nDistLimit, self.nMinDist+2)

    def getSuggestions (self, nSuggLimit=10, nDistLimit=-1):
        "return a list of suggestions"
        lRes = []
        if self.dSugg[0]:
            # we sort the better results with the original word
            self.dSugg[0].sort(key=lambda sSugg: st.distanceDamerauLevenshtein(self.sWord, sSugg))

        for lSugg in self.dSugg.values():

            lRes.extend(lSugg)
            if len(lRes) > nSuggLimit:
                break
        lRes = list(cp.filterSugg(lRes))
        if self.sWord.istitle():
            lRes = list(map(lambda sSugg: sSugg.title(), lRes))
        elif self.sWord.isupper():
            lRes = list(map(lambda sSugg: sSugg.upper(), lRes))
        return lRes[:nSuggLimit]

    def reset (self):
        self.aSugg.clear()
        self.dSugg.clear()









<



>
|
>
|
|
|

|
|
|
|







56
57
58
59
60
61
62

63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
                self.aSugg.add(sSugg)
                if nDist < self.nMinDist:
                    self.nMinDist = nDist
                self.nDistLimit = min(self.nDistLimit, self.nMinDist+2)

    def getSuggestions (self, nSuggLimit=10, nDistLimit=-1):
        "return a list of suggestions"

        if self.dSugg[0]:
            # we sort the better results with the original word
            self.dSugg[0].sort(key=lambda sSugg: st.distanceDamerauLevenshtein(self.sWord, sSugg))
        lRes = self.dSugg.pop(0)
        for nDist, lSugg in self.dSugg.items():
            if nDist <= self.nDistLimit:
                lRes.extend(lSugg)
                if len(lRes) > nSuggLimit:
                    break
        lRes = list(cp.filterSugg(lRes))
        if self.sWord.isupper():
            lRes = list(map(lambda sSugg: sSugg.upper(), lRes))
        elif self.sWord[0:1].isupper():
            lRes = list(map(lambda sSugg: sSugg[0:1].upper()+sSugg[1:], lRes))  # dont’ use <.istitle>
        return lRes[:nSuggLimit]

    def reset (self):
        self.aSugg.clear()
        self.dSugg.clear()


131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
            self.stem = self._stem3
            self._lookupArcNode = self._lookupArcNode3
            self._getArcs = self._getArcs3
            self._writeNodes = self._writeNodes3
        else:
            raise ValueError("  # Error: unknown code: {}".format(self.nCompressionMethod))

        self.bOptNumSigle = False
        self.bOptNumAtLast = False

    def _initBinary (self):
        "initialize with binary structure file"
        if self.by[0:17] != b"/grammalecte-fsa/":
            raise TypeError("# Error. Not a grammalecte-fsa binary dictionary. Header: {}".format(self.by[0:9]))
        if not(self.by[17:18] == b"1" or self.by[17:18] == b"2" or self.by[17:18] == b"3"):
            raise ValueError("# Error. Unknown dictionary version: {}".format(self.by[17:18]))







|
|







132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
            self.stem = self._stem3
            self._lookupArcNode = self._lookupArcNode3
            self._getArcs = self._getArcs3
            self._writeNodes = self._writeNodes3
        else:
            raise ValueError("  # Error: unknown code: {}".format(self.nCompressionMethod))

        self.bAcronymValid = True
        self.bNumAtLastValid = False

    def _initBinary (self):
        "initialize with binary structure file"
        if self.by[0:17] != b"/grammalecte-fsa/":
            raise TypeError("# Error. Not a grammalecte-fsa binary dictionary. Header: {}".format(self.by[0:9]))
        if not(self.by[17:18] == b"1" or self.by[17:18] == b"2" or self.by[17:18] == b"3"):
            raise ValueError("# Error. Unknown dictionary version: {}".format(self.by[17:18]))
228
229
230
231
232
233
234


235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257


258
259
260
261
262
263
264
        sToken = cp.spellingNormalization(sToken)
        if self.isValid(sToken):
            return True
        if "-" in sToken:
            if sToken.count("-") > 4:
                return True
            return all(self.isValid(sWord)  for sWord in sToken.split("-"))


        return False

    def isValid (self, sWord):
        "checks if <sWord> is valid (different casing tested if the first letter is a capital)"
        if not sWord:
            return None
        if "’" in sWord: # ugly hack
            sWord = sWord.replace("’", "'")
        if self.lookup(sWord):
            return True
        if sWord.isdigit():
            return True
        if sWord[0:1].isupper():
            if len(sWord) > 1:
                if sWord.istitle():
                    return self.lookup(sWord.lower())
                if sWord.isupper():
                    if self.bOptNumSigle:
                        return True
                    return self.lookup(sWord.lower()) or self.lookup(sWord.capitalize())
                return self.lookup(sWord[:1].lower() + sWord[1:])
            else:
                return self.lookup(sWord.lower())


        return False

    def lookup (self, sWord):
        "returns True if <sWord> in dictionary (strict verification)"
        iAddr = 0
        for c in sWord:
            if c not in self.dChar:







>
>










<
<





|





>
>







229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247


248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
        sToken = cp.spellingNormalization(sToken)
        if self.isValid(sToken):
            return True
        if "-" in sToken:
            if sToken.count("-") > 4:
                return True
            return all(self.isValid(sWord)  for sWord in sToken.split("-"))
        if "." in sToken or "·" in sToken:
            return True
        return False

    def isValid (self, sWord):
        "checks if <sWord> is valid (different casing tested if the first letter is a capital)"
        if not sWord:
            return None
        if "’" in sWord: # ugly hack
            sWord = sWord.replace("’", "'")
        if self.lookup(sWord):
            return True


        if sWord[0:1].isupper():
            if len(sWord) > 1:
                if sWord.istitle():
                    return self.lookup(sWord.lower())
                if sWord.isupper():
                    if self.bAcronymValid:
                        return True
                    return self.lookup(sWord.lower()) or self.lookup(sWord.capitalize())
                return self.lookup(sWord[:1].lower() + sWord[1:])
            else:
                return self.lookup(sWord.lower())
        if sWord[0:1].isdigit():
            return True
        return False

    def lookup (self, sWord):
        "returns True if <sWord> in dictionary (strict verification)"
        iAddr = 0
        for c in sWord:
            if c not in self.dChar:
277
278
279
280
281
282
283

284
285
286
287
288

289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309


310
311
312
313
314

315


316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
            if sWord.isupper() and len(sWord) > 1:
                l.extend(self.morph(sWord.capitalize()))
        return l

    #@timethis
    def suggest (self, sWord, nSuggLimit=10):
        "returns a set of suggestions for <sWord>"

        sWord = cp.spellingNormalization(sWord)
        sPfx, sWord, sSfx = cp.cut(sWord)
        nMaxSwitch = max(len(sWord) // 3, 1)
        nMaxDel = len(sWord) // 5
        nMaxHardRepl = max((len(sWord) - 5) // 4, 1)

        oSuggResult = SuggResult(sWord)
        self._suggest(oSuggResult, sWord, nMaxSwitch=nMaxSwitch, nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl)
        if sWord.istitle():
            self._suggest(oSuggResult, sWord.lower(), nMaxSwitch=nMaxSwitch, nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl)
        elif sWord.islower():
            self._suggest(oSuggResult, sWord.title(), nMaxSwitch=nMaxSwitch, nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl)
        aSugg = oSuggResult.getSuggestions(nSuggLimit)
        if sSfx or sPfx:
            # we add what we removed
            return list(map(lambda sSug: sPfx + sSug + sSfx, aSugg))
        return aSugg

    def _suggest (self, oSuggResult, sRemain, nMaxSwitch=0, nMaxDel=0, nMaxHardRepl=0, nDeep=0, iAddr=0, sNewWord="", bAvoidLoop=False):
        # recursive function
        #logging.info((nDeep * "  ") + sNewWord + ":" + sRemain)
        if not sRemain:
            if int.from_bytes(self.byDic[iAddr:iAddr+self.nBytesArc], byteorder='big') & self._finalNodeMask:
                oSuggResult.addSugg(sNewWord, nDeep)
            for sTail in self._getTails(iAddr):
                oSuggResult.addSugg(sNewWord+sTail, nDeep)
            return


        cCurrent = sRemain[0:1]
        for cChar, jAddr in self._getCharArcs(iAddr):
            if cChar in cp.d1to1.get(cCurrent, cCurrent):
                self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, jAddr, sNewWord+cChar)
            elif not bAvoidLoop and nMaxHardRepl:

                self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl-1, nDeep+1, jAddr, sNewWord+cChar, True)


        if not bAvoidLoop: # avoid infinite loop
            if len(sRemain) > 1:
                if cCurrent == sRemain[1:2]:
                    # same char, we remove 1 char without adding 1 to <sNewWord>
                    self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord)
                else:
                    # switching chars
                    if nMaxSwitch:
                        self._suggest(oSuggResult, sRemain[1:2]+sRemain[0:1]+sRemain[2:], nMaxSwitch-1, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True)
                    # delete char
                    if nMaxDel:
                        self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel-1, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True)
                # Phonetic replacements
                for sRepl in cp.get1toXReplacement(sNewWord[-1:], cCurrent, sRemain[1:2]):
                    self._suggest(oSuggResult, sRepl + sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True)
                for sRepl in cp.d2toX.get(sRemain[0:2], ()):
                    self._suggest(oSuggResult, sRepl + sRemain[2:], nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True)
            # end of word
            if len(sRemain) == 2:
                for sRepl in cp.dFinal2.get(sRemain, ()):
                    self._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True)
            elif len(sRemain) == 1:
                self._suggest(oSuggResult, "", nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True) # remove last char and go on
                for sRepl in cp.dFinal1.get(sRemain, ()):
                    self._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, True)

    #@timethis
    def suggest2 (self, sWord, nMaxSugg=10):
        "returns a set of suggestions for <sWord>"
        sWord = cp.spellingNormalization(sWord)
        sPfx, sWord, sSfx = cp.cut(sWord)
        oSuggResult = SuggResult(sWord)







>





>

|
<
<
<
<






|








>
>



|
|
>
|
>
>




|



|


|


|

|



|

|

|







280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295




296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
            if sWord.isupper() and len(sWord) > 1:
                l.extend(self.morph(sWord.capitalize()))
        return l

    #@timethis
    def suggest (self, sWord, nSuggLimit=10):
        "returns a set of suggestions for <sWord>"
        sWord = sWord.rstrip(".")   # useful for LibreOffice
        sWord = cp.spellingNormalization(sWord)
        sPfx, sWord, sSfx = cp.cut(sWord)
        nMaxSwitch = max(len(sWord) // 3, 1)
        nMaxDel = len(sWord) // 5
        nMaxHardRepl = max((len(sWord) - 5) // 4, 1)
        nMaxJump = max(len(sWord) // 4, 1)
        oSuggResult = SuggResult(sWord)
        self._suggest(oSuggResult, sWord, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump)




        aSugg = oSuggResult.getSuggestions(nSuggLimit)
        if sSfx or sPfx:
            # we add what we removed
            return list(map(lambda sSug: sPfx + sSug + sSfx, aSugg))
        return aSugg

    def _suggest (self, oSuggResult, sRemain, nMaxSwitch=0, nMaxDel=0, nMaxHardRepl=0, nMaxJump=0, nDist=0, nDeep=0, iAddr=0, sNewWord="", bAvoidLoop=False):
        # recursive function
        #logging.info((nDeep * "  ") + sNewWord + ":" + sRemain)
        if not sRemain:
            if int.from_bytes(self.byDic[iAddr:iAddr+self.nBytesArc], byteorder='big') & self._finalNodeMask:
                oSuggResult.addSugg(sNewWord, nDeep)
            for sTail in self._getTails(iAddr):
                oSuggResult.addSugg(sNewWord+sTail, nDeep)
            return
        if nDist > oSuggResult.nDistLimit:
            return
        cCurrent = sRemain[0:1]
        for cChar, jAddr in self._getCharArcs(iAddr):
            if cChar in cp.d1to1.get(cCurrent, cCurrent):
                self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, jAddr, sNewWord+cChar)
            elif not bAvoidLoop:
                if nMaxHardRepl:
                    self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl-1, nMaxJump, nDist+1, nDeep+1, jAddr, sNewWord+cChar, True)
                if nMaxJump:
                    self._suggest(oSuggResult, sRemain, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump-1, nDist+1, nDeep+1, jAddr, sNewWord+cChar, True)
        if not bAvoidLoop: # avoid infinite loop
            if len(sRemain) > 1:
                if cCurrent == sRemain[1:2]:
                    # same char, we remove 1 char without adding 1 to <sNewWord>
                    self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord)
                else:
                    # switching chars
                    if nMaxSwitch:
                        self._suggest(oSuggResult, sRemain[1:2]+sRemain[0:1]+sRemain[2:], nMaxSwitch-1, nMaxDel, nMaxHardRepl, nMaxJump, nDist+1, nDeep+1, iAddr, sNewWord, True)
                    # delete char
                    if nMaxDel:
                        self._suggest(oSuggResult, sRemain[1:], nMaxSwitch, nMaxDel-1, nMaxHardRepl, nMaxJump, nDist+1, nDeep+1, iAddr, sNewWord, True)
                # Phonetic replacements
                for sRepl in cp.get1toXReplacement(sNewWord[-1:], cCurrent, sRemain[1:2]):
                    self._suggest(oSuggResult, sRepl + sRemain[1:], nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, True)
                for sRepl in cp.d2toX.get(sRemain[0:2], ()):
                    self._suggest(oSuggResult, sRepl + sRemain[2:], nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, True)
            # end of word
            if len(sRemain) == 2:
                for sRepl in cp.dFinal2.get(sRemain, ()):
                    self._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, True)
            elif len(sRemain) == 1:
                self._suggest(oSuggResult, "", nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, True) # remove last char and go on
                for sRepl in cp.dFinal1.get(sRemain, ()):
                    self._suggest(oSuggResult, sRepl, nMaxSwitch, nMaxDel, nMaxHardRepl, nMaxJump, nDist, nDeep+1, iAddr, sNewWord, True)

    #@timethis
    def suggest2 (self, sWord, nMaxSugg=10):
        "returns a set of suggestions for <sWord>"
        sWord = cp.spellingNormalization(sWord)
        sPfx, sWord, sSfx = cp.cut(sWord)
        oSuggResult = SuggResult(sWord)

Modified graphspell/spellchecker.py from [696f7480ec] to [cbd22d2c4d].

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import traceback

from . import ibdawg
from . import tokenizer


dDefaultDictionaries = {
    "fr": "fr.bdic",
    "en": "en.bdic"
}


class SpellChecker ():

    def __init__ (self, sLangCode, sfMainDic="", sfExtendedDic="", sfCommunityDic="", sfPersonalDic=""):







|







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import traceback

from . import ibdawg
from . import tokenizer


dDefaultDictionaries = {
    "fr": "fr-allvars.bdic",
    "en": "en.bdic"
}


class SpellChecker ():

    def __init__ (self, sLangCode, sfMainDic="", sfExtendedDic="", sfCommunityDic="", sfPersonalDic=""):

Modified js_extension/string.js from [34840fe9fe] to [aea1fc20a2].

16
17
18
19
20
21
22



23
24
25
26
27
28
29
            iPos += nStep;
        }
        return nOccur;
    };
    String.prototype.gl_isDigit = function () {
        return (this.search(/^[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]+$/) !== -1);
    };



    String.prototype.gl_isLowerCase = function () {
        return (this.search(/^[a-zà-öø-ÿ0-9-]+$/) !== -1);
    };
    String.prototype.gl_isUpperCase = function () {
        return (this.search(/^[A-ZÀ-ÖØ-ߌ0-9-]+$/) !== -1);
    };
    String.prototype.gl_isTitle = function () {







>
>
>







16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
            iPos += nStep;
        }
        return nOccur;
    };
    String.prototype.gl_isDigit = function () {
        return (this.search(/^[0-9⁰¹²³⁴⁵⁶⁷⁸⁹]+$/) !== -1);
    };
    String.prototype.gl_isAlpha = function () {
        return (this.search(/^[a-zA-Zà-öÀ-Öø-ÿØ-ßĀ-ʯ]+$/) !== -1);
    };
    String.prototype.gl_isLowerCase = function () {
        return (this.search(/^[a-zà-öø-ÿ0-9-]+$/) !== -1);
    };
    String.prototype.gl_isUpperCase = function () {
        return (this.search(/^[A-ZÀ-ÖØ-ߌ0-9-]+$/) !== -1);
    };
    String.prototype.gl_isTitle = function () {

Modified lex_build.py from [2d1c4b9aa4] to [346704203c].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#!python3

# Lexicon builder

import argparse
from distutils import dir_util

import graphspell.dawg as fsa
from graphspell.ibdawg import IBDAWG


def build (spfSrc, sLangCode, sLangName, sfDict, bJSON=False, sDicName="", cStemmingMethod="S", nCompressMethod=1):
    "transform a text lexicon as a binary indexable dictionary"
    oDAWG = fsa.DAWG(spfSrc, cStemmingMethod, sLangCode, sLangName, sDicName)
    dir_util.mkpath("graphspell/_dictionaries")
    oDAWG.writeInfo("graphspell/_dictionaries/" + sfDict + ".info.txt")
    oDAWG.writeBinary("graphspell/_dictionaries/" + sfDict + ".bdic", int(nCompressMethod))
    if bJSON:
        dir_util.mkpath("graphspell-js/_dictionaries")
        oDic = IBDAWG(sfDict + ".bdic")
        oDic.writeAsJSObject("graphspell-js/_dictionaries/" + sfDict + ".json", bBinaryDictAsHexString=True)











|

|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#!python3

# Lexicon builder

import argparse
from distutils import dir_util

import graphspell.dawg as fsa
from graphspell.ibdawg import IBDAWG


def build (spfSrc, sLangCode, sLangName, sfDict, bJSON=False, sDicName="", sFilter="", cStemmingMethod="S", nCompressMethod=1):
    "transform a text lexicon as a binary indexable dictionary"
    oDAWG = fsa.DAWG(spfSrc, cStemmingMethod, sLangCode, sLangName, sDicName, sFilter)
    dir_util.mkpath("graphspell/_dictionaries")
    oDAWG.writeInfo("graphspell/_dictionaries/" + sfDict + ".info.txt")
    oDAWG.writeBinary("graphspell/_dictionaries/" + sfDict + ".bdic", int(nCompressMethod))
    if bJSON:
        dir_util.mkpath("graphspell-js/_dictionaries")
        oDic = IBDAWG(sfDict + ".bdic")
        oDic.writeAsJSObject("graphspell-js/_dictionaries/" + sfDict + ".json", bBinaryDictAsHexString=True)

Modified lexicons/French.lex from [305d84974d] to [168faf793a].

more than 10,000 changes

Modified make.py from [cd8c44402d] to [834b678ac5].

312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334

335
336


337
338
339
340
341
342
343




344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
    dVars["dic_main_filename_js"] = ""
    dVars["dic_extended_filename_py"] = ""
    dVars["dic_extended_filename_js"] = ""
    dVars["dic_community_filename_py"] = ""
    dVars["dic_community_filename_js"] = ""
    dVars["dic_personal_filename_py"] = ""
    dVars["dic_personal_filename_js"] = ""
    lDict = [ ("main", dVars['dic_filename']) ]
    if bExtendedDict:
        lDict.append(("extended", dVars['dic_extended_filename']))
    if bCommunityDict:
        lDict.append(("community", dVars['dic_community_filename']))
    if bPersonalDict:
        lDict.append(("personal", dVars['dic_personal_filename']))
    for sType, sFileName in lDict:
        spfPyDic = "graphspell/_dictionaries/" + sFileName + ".bdic"
        spfJSDic = "graphspell-js/_dictionaries/" + sFileName + ".json"
        if not os.path.isfile(spfPyDic) or (bJavaScript and not os.path.isfile(spfJSDic)):
            buildDictionary(dVars, sType, bJavaScript)
        print(spfPyDic)
        file_util.copy_file(spfPyDic, "grammalecte/graphspell/_dictionaries")
        dVars['dic_'+sType+'_filename_py'] = sFileName + '.bdic'
        if bJavaScript:

            file_util.copy_file(spfJSDic, "grammalecte-js/graphspell/_dictionaries")
            dVars['dic_'+sType+'_filename_js'] = sFileName + '.json'




def buildDictionary (dVars, sType, bJavaScript=False):
    if sType == "main":
        spfLexSrc = dVars['lexicon_src']
        sfDictDst = dVars['dic_filename']
        sDicName = dVars['dic_name']




    elif sType == "extended":
        spfLexSrc = dVars['lexicon_extended_src']
        sfDictDst = dVars['dic_extended_filename']
        sDicName = dVars['dic_extended_name']
    elif sType == "community":
        spfLexSrc = dVars['lexicon_community_src']
        sfDictDst = dVars['dic_community_filename']
        sDicName = dVars['dic_community_name']
    elif sType == "personal":
        spfLexSrc = dVars['lexicon_personal_src']
        sfDictDst = dVars['dic_personal_filename']
        sDicName = dVars['dic_personal_name']
    lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, dVars['stemming_method'], int(dVars['fsa_method']))



def main ():
    print("Python: " + sys.version)
    xParser = argparse.ArgumentParser()
    xParser.add_argument("lang", type=str, nargs='+', help="lang project to generate (name of folder in /lang)")







|















>


>
>





|
|
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|







312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
    dVars["dic_main_filename_js"] = ""
    dVars["dic_extended_filename_py"] = ""
    dVars["dic_extended_filename_js"] = ""
    dVars["dic_community_filename_py"] = ""
    dVars["dic_community_filename_js"] = ""
    dVars["dic_personal_filename_py"] = ""
    dVars["dic_personal_filename_js"] = ""
    lDict = [ ("main", s)  for s in dVars['dic_filenames'].split(",") ]
    if bExtendedDict:
        lDict.append(("extended", dVars['dic_extended_filename']))
    if bCommunityDict:
        lDict.append(("community", dVars['dic_community_filename']))
    if bPersonalDict:
        lDict.append(("personal", dVars['dic_personal_filename']))
    for sType, sFileName in lDict:
        spfPyDic = "graphspell/_dictionaries/" + sFileName + ".bdic"
        spfJSDic = "graphspell-js/_dictionaries/" + sFileName + ".json"
        if not os.path.isfile(spfPyDic) or (bJavaScript and not os.path.isfile(spfJSDic)):
            buildDictionary(dVars, sType, bJavaScript)
        print(spfPyDic)
        file_util.copy_file(spfPyDic, "grammalecte/graphspell/_dictionaries")
        dVars['dic_'+sType+'_filename_py'] = sFileName + '.bdic'
        if bJavaScript:
            print(spfJSDic)
            file_util.copy_file(spfJSDic, "grammalecte-js/graphspell/_dictionaries")
            dVars['dic_'+sType+'_filename_js'] = sFileName + '.json'
    dVars['dic_main_filename_py'] = dVars['dic_default_filename_py'] + ".bdic"
    dVars['dic_main_filename_js'] = dVars['dic_default_filename_js'] + ".json"


def buildDictionary (dVars, sType, bJavaScript=False):
    if sType == "main":
        spfLexSrc = dVars['lexicon_src']
        l_sfDictDst = dVars['dic_filenames'].split(",")
        l_sDicName = dVars['dic_name'].split(",")
        l_sFilter = dVars['dic_filter'].split(",")
        for sfDictDst, sDicName, sFilter in zip(l_sfDictDst, l_sDicName, l_sFilter):
            lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, sFilter, dVars['stemming_method'], int(dVars['fsa_method']))
    else:
        if sType == "extended":
            spfLexSrc = dVars['lexicon_extended_src']
            sfDictDst = dVars['dic_extended_filename']
            sDicName = dVars['dic_extended_name']
        elif sType == "community":
            spfLexSrc = dVars['lexicon_community_src']
            sfDictDst = dVars['dic_community_filename']
            sDicName = dVars['dic_community_name']
        elif sType == "personal":
            spfLexSrc = dVars['lexicon_personal_src']
            sfDictDst = dVars['dic_personal_filename']
            sDicName = dVars['dic_personal_name']
        lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, "", dVars['stemming_method'], int(dVars['fsa_method']))



def main ():
    print("Python: " + sys.version)
    xParser = argparse.ArgumentParser()
    xParser.add_argument("lang", type=str, nargs='+', help="lang project to generate (name of folder in /lang)")