Grammalecte  Check-in [261ec1c90b]

Overview
Comment:[build][core][fr] rename function
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk | fr | core | build
Files: files | file ages | folders
SHA3-256: 261ec1c90bb719b1049b51d84b7c5393c7069d5d829971d470c5d39d9b84b8ee
User & Date: olr on 2020-11-26 12:30:38
Other Links: manifest | tags
Context
2020-11-27
09:52
[core] tests: extend neutralization for spellchecker check-in: f667bcada7 user: olr tags: trunk, core
2020-11-26
12:30
[build][core][fr] rename function check-in: 261ec1c90b user: olr tags: trunk, fr, core, build
07:12
[build][core][fr][doc] rename functions check-in: b316995bfc user: olr tags: trunk, fr, core, build, doc
Changes

Modified compile_rules_graph.py from [7091a2b5a1] to [ae796c907e].

42
43
44
45
46
47
48
49
50


51
52
53
54
55
56
57
42
43
44
45
46
47
48


49
50
51
52
53
54
55
56
57







-
-
+
+







    sCode = re.sub(r"\b(morph0?|morphVC|value|tag|meta|info)[(]\\-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
    sCode = re.sub(r"\b(select|exclude|define|definefrom|rewrite|addmorph|setmeta)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode)
    sCode = re.sub(r"\b(select|exclude|define|definefrom|rewrite|addmorph|setmeta)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
    sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], dTags', sCode)
    sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1], dTags', sCode)
    sCode = re.sub(r"\bspace[(][\\](\d+)", 'g_space(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
    sCode = re.sub(r"\bspace[(][\\]-(\d+)", 'g_space(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
    sCode = re.sub(r"\banalyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
    sCode = re.sub(r"\banalyse_with_next[(][\\]-(\d+)", 'g_merged_analyse(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
    sCode = re.sub(r"\bmorph2[(][\\](\d+)", 'g_morph2(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
    sCode = re.sub(r"\bmorph2[(][\\]-(\d+)", 'g_morph2(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
    sCode = re.sub(r"\b(morph0?|tag|meta|value)\(>1", 'g_\\1(lToken[nLastToken+1]', sCode)                      # next token
    sCode = re.sub(r"\b(morph0?|tag|meta|value)\(<1", 'g_\\1(lToken[nTokenOffset]', sCode)                      # previous token
    sCode = re.sub(r"\b(morph0?|tag|meta|value)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', sCode)       # next token
    sCode = re.sub(r"\b(morph0?|tag|meta|value)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', sCode)   # previous token
    sCode = re.sub(r"\bspace[(](>1)", 'g_space(lToken[nLastToken+1], g_token(lToken, nLastToken+2)', sCode)     # next token
    sCode = re.sub(r"\bspace[(](<1)", 'g_space(lToken[nTokenOffset], lToken[nTokenOffset+1]', sCode)            # previous token
    sCode = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', sCode)

Modified gc_core/js/lang_core/gc_functions.js from [56e86460e7] to [1f04b155c5].

265
266
267
268
269
270
271
272

273
274
275
276
277
278
279
265
266
267
268
269
270
271

272
273
274
275
276
277
278
279







-
+







            }
        }
    }
    // search sPattern
    return lMorph.some(sMorph  =>  (sMorph.search(sPattern) !== -1));
}

function g_merged_analyse (oToken1, oToken2, cMerger, sPattern, sNegPattern="", bSetMorph=true) {
function g_morph2 (oToken1, oToken2, cMerger, sPattern, sNegPattern="", bSetMorph=true) {
    // merge two token values, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)
    let lMorph = gc_engine.oSpellChecker.getMorph(oToken1["sValue"] + cMerger + oToken2["sValue"]);
    if (lMorph.length == 0) {
        return false;
    }
    // check negative condition
    if (sNegPattern) {

Modified gc_core/py/lang_core/gc_functions.py from [5fef80fdf0] to [fb2b86ce70].

219
220
221
222
223
224
225
226

227
228
229
230
231
232
233
219
220
221
222
223
224
225

226
227
228
229
230
231
232
233







-
+







        if any(zNegPattern.search(sMorph)  for sMorph in lMorph):
            return False
    # search sPattern
    zPattern = re.compile(sPattern)
    return any(zPattern.search(sMorph)  for sMorph in lMorph)


def g_merged_analyse (dToken1, dToken2, cMerger, sPattern, sNegPattern="", bSetMorph=True):
def g_morph2 (dToken1, dToken2, cMerger, sPattern, sNegPattern="", bSetMorph=True):
    "merge two token values, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)"
    lMorph = _oSpellChecker.getMorph(dToken1["sValue"] + cMerger + dToken2["sValue"])
    if not lMorph:
        return False
    # check negative condition
    if sNegPattern:
        if sNegPattern == "*":

Modified gc_lang/fr/rules.grx from [5ff861bc84] to [bcfa5485c2].

2764
2765
2766
2767
2768
2769
2770
2771

2772
2773
2774
2775
2776
2777
2778
2764
2765
2766
2767
2768
2769
2770

2771
2772
2773
2774
2775
2776
2777
2778







-
+







        <<- =>> define(\1, ":N:e:i")

    Mai 68
        <<- ~>> ␣
        <<- =>> define(\1, ":MP:m:i")

    ~^[A-ZÀÂÉÈÊÎÔ].  ~^[A-ZÀÂÉÈÊÎÔ].
        <<- analyse_with_next(\1, " ", ":") ~>> ␣
        <<- morph2(\1, " ", ":") ~>> ␣
        <<- __else__ and morph(\1, ":M") and morph(\2, ":V", ":[GM]") =>> define(\2, ":M2")

    Me  ~^[A-ZÀÂÉÈÊÎÔ].
        <<- =>> define(\1, ":T")
        <<- ~1>> *

    [la|cette|the]  ~^[A-ZÀÂÉÈÊÎÔ].  ?~^[A-ZÀÂÉÈÊÎÔ].¿  ?~^[A-ZÀÂÉÈÊÎÔ].¿  [Administration|Area|Army|Assocation|Avenue|Chamber|Church|City|Community|Court|Company|Corporation|Cup|Enterprise|Foundation|Fraternity|Gallery|Highway|House|Initiative|League|Library|Mansion|Nation|Navy|Organization|Reserve|Road|River|Sea|Society|Station|Sorority|Tavern|Tower|University|Valley]
5083
5084
5085
5086
5087
5088
5089
5090

5091
5092
5093
5094

5095
5096
5097
5098
5099
5100
5101
5102
5103

5104
5105
5106
5107

5108
5109
5110
5111

5112
5113
5114
5115
5116
5117
5118
5083
5084
5085
5086
5087
5088
5089

5090
5091
5092
5093

5094
5095
5096
5097
5098
5099
5100
5101
5102

5103
5104
5105
5106

5107
5108
5109
5110

5111
5112
5113
5114
5115
5116
5117
5118







-
+



-
+








-
+



-
+



-
+







TEST: des {{franco américains}}
TEST: {{franco américaine}}                             ->> franco-américaine
TEST: l’{{israélo belge}}


__tu_préfixe_xxxo__
    [macro|magnéto|micro|paléo|rétro|rhino|stéréo]  *WORD
        <<- /tu/ analyse_with_next(\1, "-", ":")
        <<- /tu/ morph2(\1, "-", ":")
        ->> \1-\2                                                                                   && S’il s’agit d’un seul mot, il manque un trait d’union.

    [électro|ferro|hydro|labio|médico|nano|néo|neuro|physico|politico|sino|socio]  *WORD
        <<- /tu/ space(\1, 1, 1) and (morph(\2, ":N") or analyse_with_next(\1, "-", ":"))
        <<- /tu/ space(\1, 1, 1) and (morph(\2, ":N") or morph2(\1, "-", ":"))
        ->> \1-\2                                                                                   && S’il s’agit d’un seul mot, il manque un trait d’union.

TEST: {{ferro électrique}}                              ->> ferro-électrique
TEST: {{rétro ingénierie}}.                             ->> rétro-ingénierie


__tu_préfixe_divers__
    [anti|auto|arrière|avant|demi|extra|intra|multi|post]  *WORD
        <<- /tu/ morph(<1, ":D|<start>|>,") and analyse_with_next(\1, "-", ":")
        <<- /tu/ morph(<1, ":D|<start>|>,") and morph2(\1, "-", ":")
        ->> \1-\2                                                                                   && Il manque probablement un trait d’union.

    [non|sans]  *WORD
        <<- /tu/ morph(<1, ":D") and analyse_with_next(\1, "-", ":")
        <<- /tu/ morph(<1, ":D") and morph2(\1, "-", ":")
        ->> \1-\2                                                                                   && Il manque probablement un trait d’union.

    sous  *WORD
        <<- /tu/ not(\2 == "forme" and value(>1, "|de|d’|")) and morph(<1, ":D") and analyse_with_next(\1, "-", ":")
        <<- /tu/ not(\2 == "forme" and value(>1, "|de|d’|")) and morph(<1, ":D") and morph2(\1, "-", ":")
        ->> \1-\2                                                                                   && Il manque probablement un trait d’union.

TEST: il a pris une balle dans l’{{arrière train}}.
TEST: Ce {{sans gêne}} mérite une bonne leçon
TEST: une {{sous culture}} passée de mode
TEST: l’{{avant train}}
TEST: l’{{arrière pensée}}
5134
5135
5136
5137
5138
5139
5140
5141

5142
5143
5144
5145

5146
5147
5148
5149
5150

5151
5152
5153
5154

5155
5156
5157
5158
5159
5160
5161
5134
5135
5136
5137
5138
5139
5140

5141
5142
5143
5144

5145
5146
5147
5148
5149

5150
5151
5152
5153

5154
5155
5156
5157
5158
5159
5160
5161







-
+



-
+




-
+



-
+







TEST: il n’avait contracté qu’un {{pseudo mariage}}.
TEST: elle connaissait de {{cet}} pseudo-prêtresse uniquement de nom.
TEST: la {{pseudo taxe}} carbone


__tu_mots_composés_verbe_nom__
    [contre|entre]  *WORD
        <<- /tu/ not morph(\2, ":[GYB]") and morph(<1, ":(?:D|V0e)|<start>|>,") and analyse_with_next(\1, "-", ":N")
        <<- /tu/ not morph(\2, ":[GYB]") and morph(<1, ":(?:D|V0e)|<start>|>,") and morph2(\1, "-", ":N")
        ->> \1-\2                                                                                   && Il manque probablement un trait d’union.

    [m’|t’|s’] entre *WORD
        <<- /tu/ morph(\3, ":V") and analyse_with_next(\2, "-", ":V")
        <<- /tu/ morph(\3, ":V") and morph2(\2, "-", ":V")
        -2:3>> \2-\3                                                                                && Il manque probablement un trait d’union.

    nous nous entre ~on[ts]$
    vous vous entre ~e[zr]$
        <<- /tu/ morph(\4, ":V") and analyse_with_next(\3, "-", ":V") and not morph(<1, ":R")
        <<- /tu/ morph(\4, ":V") and morph2(\3, "-", ":V") and not morph(<1, ":R")
        -3:4>> \3-\4                                                                                && Il manque probablement un trait d’union.

    [attrape|garde|porte|brise|cache|casse|chauffe|coupe|cure|croque|essuie|lance|lave|lève|marque|pare|passe|perce|pèse|porte|poste|pousse|presse|protège|ramasse|serre|taille|tire|tourne|traîne|traine|vide]  *WORD
        <<- /tu/ morph(<1, ":(?:D|V0e)|<start>|>,") and analyse_with_next(\1, "-", ":N")
        <<- /tu/ morph(<1, ":(?:D|V0e)|<start>|>,") and morph2(\1, "-", ":N")
        ->> \1-\2                                                                                   && Il manque probablement un trait d’union.

TEST: c’est le {{contre exemple}} parfait
TEST: une {{entre voie}}
TEST: s’{{entre regarder}}
TEST: ce sont des {{lève tard}}.
TEST: nous nous {{entre tuions}}

Modified misc/grammalecte.sublime-syntax from [575daf5fb0] to [215e477290].

56
57
58
59
60
61
62
63

64
65
66
67
68
69
70
56
57
58
59
60
61
62

63
64
65
66
67
68
69
70







-
+







    # other.
    - match: '\b(?:if|else|and|or|not|in)\b'
      scope: keyword.python

    - match: '\b(?:True|False|None)\b'
      scope: constant.language

    - match: '\b(?:spell|morph(?:VC|)|stem|tag|value|space|textarea0?\w*|before0?\w*|after0?\w*|word|option|define(?:from|)|select|exclude|setmeta|analyse|tag(?:after|before)|apposition|is[A-Z]\w+|agreement|rewrite|checkD\w+|getD\w+|has[A-Z]\w+|sugg[A-Z]\w+|switch[A-Z]\w+|ceOrCet|formatN\w+|mbUnit)\b'
    - match: '\b(?:spell|morph(?:VC|0|2|)|stem|tag|value|space|textarea0?\w*|before0?\w*|after0?\w*|word|option|define(?:from|)|select|exclude|setmeta|analyse|tag(?:after|before)|apposition|is[A-Z]\w+|agreement|rewrite|checkD\w+|getD\w+|has[A-Z]\w+|sugg[A-Z]\w+|switch[A-Z]\w+|ceOrCet|formatN\w+|mbUnit)\b'
      scope: entity.name.function

    - match: '\b(?:replace|endswith|startswith|search|upper|lower|capitalize|strip|rstrip|is(?:alpha|upper|lower|digit|title))\b'
      scope: support.function

    - match: '\becho\b'
      scope: support.function.debug