Index: gc_core/js/text.js ================================================================== --- gc_core/js/text.js +++ gc_core/js/text.js @@ -9,16 +9,16 @@ var helpers = require("resource://grammalecte/helpers.js"); } var text = { - getParagraph: function* (sText) { + getParagraph: function* (sText, sSepParagraph = "\n") { // generator: returns paragraphs of text let iStart = 0; let iEnd = 0; sText = sText.replace("\r\n", "\n").replace("\r", "\n"); - while ((iEnd = sText.indexOf("\n", iStart)) !== -1) { + while ((iEnd = sText.indexOf(sSepParagraph, iStart)) !== -1) { yield sText.slice(iStart, iEnd); iStart = iEnd + 1; } yield sText.slice(iStart); }, @@ -43,11 +43,11 @@ }, getReadableError: function (oErr) { // Returns an error oErr as a readable error try { - let sResult = "\n* " + oErr['nStart'] + ":" + oErr['nEnd'] + let sResult = "\n* " + oErr['nStart'] + ":" + oErr['nEnd'] + " # " + oErr['sLineId'] + " # " + oErr['sRuleId'] + ":\n"; sResult += " " + oErr["sMessage"]; if (oErr["aSuggestions"].length > 0) { sResult += "\n > Suggestions : " + oErr["aSuggestions"].join(" | "); } Index: gc_lang/fr/build_data.py ================================================================== --- gc_lang/fr/build_data.py +++ gc_lang/fr/build_data.py @@ -10,10 +10,11 @@ import grammalecte.ibdawg as ibdawg from grammalecte.echo import echo from grammalecte.str_transform import defineSuffixCode import grammalecte.fr.conj as conj +import grammalecte.tokenizer as tkz class cd: """Context manager for changing the current working directory""" def __init__ (self, newPath): @@ -256,11 +257,11 @@ def makePhonetTable (sp, bJS=False): print("> Correspondances phonétiques ", end="") print("(Python et JavaScript)" if bJS else "(Python seulement)") - + try: oDict = ibdawg.IBDAWG("French.bdic") except: traceback.print_exc() return @@ -310,25 +311,28 @@ def makeLocutions (sp, bJS=False): "compile list of locutions in JSON" print("> Locutions ", end="") print("(Python et JavaScript)" if bJS else "(Python seulement)") with open(sp+"/data/locutions.txt", 'r', encoding='utf-8') as hSrc: - dLocutions = {} + dLocGraph = {} + oTokenizer = tkz.Tokenizer("fr") for sLine in hSrc.readlines(): if not sLine.startswith("#") and sLine.strip(): - lElem = sLine.strip().split() - dCur = dLocutions - for sWord in lElem: + dCur = dLocGraph + sLoc, sTag = sLine.strip().split("\t") + for oToken in oTokenizer.genTokens(sLoc.strip()): + sWord = oToken["sValue"] if sWord not in dCur: dCur[sWord] = {} dCur = dCur[sWord] + dCur[":"] = sTag sCode = "# generated data (do not edit)\n\n" + \ - "dLocutions = " + str(dLocutions) + "\n" + "dLocutions = " + str(dLocGraph) + "\n" open(sp+"/modules/locutions_data.py", "w", encoding="utf-8", newline="\n").write(sCode) if bJS: - open(sp+"/modules-js/locutions_data.json", "w", encoding="utf-8", newline="\n").write(json.dumps(dLocutions, ensure_ascii=False)) + open(sp+"/modules-js/locutions_data.json", "w", encoding="utf-8", newline="\n").write(json.dumps(dLocGraph, ensure_ascii=False)) def before (spLaunch, dVars, bJS=False): print("========== Build Hunspell dictionaries ==========") makeDictionaries(spLaunch, dVars['oxt_version']) Index: gc_lang/fr/data/locutions.txt ================================================================== --- gc_lang/fr/data/locutions.txt +++ gc_lang/fr/data/locutions.txt @@ -1,8 +1,14 @@ -à califourchon -à cœur joie -à cœur ouvert -à corps perdu -à perte de vue -à visage découvert -par ailleurs -par acquit de conscience +à califourchon :Ŵ +à contrecœur :Ŵ +à cœur joie :Ŵ +à cœur ouvert :Ŵ +à corps perdu :Ŵ +à bâtons rompus :Ŵ +à perte de vue :Ŵ +à visage découvert :Ŵ +à vue d’œil :Ŵ +à l’aveuglette :Ŵ +ad hominem :Ŵ +en tout et pour tout :Ŵ +par ailleurs :Ŵ +par acquit de conscience :Ŵ Index: gc_lang/fr/modules-js/lexicographe.js ================================================================== --- gc_lang/fr/modules-js/lexicographe.js +++ gc_lang/fr/modules-js/lexicographe.js @@ -7,15 +7,15 @@ ${string} ${map} -if (typeof(require) !== 'undefined') { +if (typeof (require) !== 'undefined') { var helpers = require("resource://grammalecte/helpers.js"); } -const _dTAGS = new Map ([ +const _dTAGS = new Map([ [':G', "[mot grammatical]"], [':N', " nom,"], [':A', " adjectif,"], [':M1', " prénom,"], [':M2', " patronyme,"], @@ -41,21 +41,23 @@ [':Ot', " pronom interrogatif,"], [':Or', " pronom relatif,"], [':Ow', " pronom adverbial,"], [':Os', " pronom personnel sujet,"], [':Oo', " pronom personnel objet,"], - [':C', " conjonction,"], - [':Ĉ', " conjonction (él.),"], + [':C', " conjonction,"], [':Cc', " conjonction de coordination,"], [':Cs', " conjonction de subordination,"], - [':Ĉs', " conjonction de subordination (él.),"], - + + [':Ĺ', " locution,"], + [':Ĉ', " locution conjonctivale (él.),"], + [':Ĉs', " locution conjonctivale de subordination (él.),"], [':Ŵ', " locution adverbiale (él.),"], [':Ñ', " locution nominale (él.),"], [':Â', " locution adjectivale (él.),"], [':Ṽ', " locution verbale (él.),"], [':Ŕ', " locution prépositive (él.),"], + [':Ô', " locution pronominales (él.),"], [':Ĵ', " locution interjective (él.),"], [':Zp', " préfixe,"], [':Zs', " suffixe,"], @@ -66,11 +68,11 @@ [':V0a', " verbe,"], [':O1', " 1ʳᵉ pers.,"], [':O2', " 2ᵉ pers.,"], [':O3', " 3ᵉ pers.,"], - + [':e', " épicène"], [':m', " masculin"], [':f', " féminin"], [':s', " singulier"], [':p', " pluriel"], @@ -104,14 +106,15 @@ ['/*', ""], ['/C', " {classique}"], ['/M', ""], ['/R', " {réforme}"], ['/A', ""], - ['/X', ""] + ['/X', ""], + ['/L', " {latin}"] ]); -const _dPFX = new Map ([ +const _dPFX = new Map([ ['d', "(de), déterminant épicène invariable"], ['l', "(le/la), déterminant masculin/féminin singulier"], ['j', "(je), pronom personnel sujet, 1ʳᵉ pers., épicène singulier"], ['m', "(me), pronom personnel objet, 1ʳᵉ pers., épicène singulier"], ['t', "(te), pronom personnel objet, 2ᵉ pers., épicène singulier"], @@ -123,28 +126,28 @@ ['lorsqu', "(lorsque), conjonction de subordination"], ['quoiqu', "(quoique), conjonction de subordination"], ['jusqu', "(jusque), préposition"] ]); -const _dAD = new Map ([ +const _dAD = new Map([ ['je', " pronom personnel sujet, 1ʳᵉ pers. sing."], ['tu', " pronom personnel sujet, 2ᵉ pers. sing."], ['il', " pronom personnel sujet, 3ᵉ pers. masc. sing."], ['on', " pronom personnel sujet, 3ᵉ pers. sing. ou plur."], ['elle', " pronom personnel sujet, 3ᵉ pers. fém. sing."], ['nous', " pronom personnel sujet/objet, 1ʳᵉ pers. plur."], ['vous', " pronom personnel sujet/objet, 2ᵉ pers. plur."], ['ils', " pronom personnel sujet, 3ᵉ pers. masc. plur."], ['elles', " pronom personnel sujet, 3ᵉ pers. masc. plur."], - + ["là", " particule démonstrative"], ["ci", " particule démonstrative"], - + ['le', " COD, masc. sing."], ['la', " COD, fém. sing."], ['les', " COD, plur."], - + ['moi', " COI (à moi), sing."], ['toi', " COI (à toi), sing."], ['lui', " COI (à lui ou à elle), sing."], ['nous2', " COI (à nous), plur."], ['vous2', " COI (à vous), plur."], @@ -159,11 +162,11 @@ ["m'en", " (me) pronom personnel objet + (en) pronom adverbial"], ["t'en", " (te) pronom personnel objet + (en) pronom adverbial"], ["s'en", " (se) pronom personnel objet + (en) pronom adverbial"] ]); -const _dSeparator = new Map ([ +const _dSeparator = new Map([ ['.', "point"], ['·', "point médian"], ['…', "points de suspension"], [':', "deux-points"], [';', "point-virgule"], @@ -194,68 +197,104 @@ ]); class Lexicographe { - constructor (oDict) { + constructor (oDict, oTokenizer, oLocGraph) { this.oDict = oDict; - this._zElidedPrefix = new RegExp ("^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)", "i"); - this._zCompoundWord = new RegExp ("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$", "i"); - this._zTag = new RegExp ("[:;/][a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ*][^:;/]*", "g"); + this.oTokenizer = oTokenizer; + this.oLocGraph = JSON.parse(oLocGraph); + + this._zElidedPrefix = new RegExp("^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)", "i"); + this._zCompoundWord = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$", "i"); + this._zTag = new RegExp("[:;/][a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ*Ṽ][^:;/]*", "g"); + } getInfoForToken (oToken) { // Token: .sType, .sValue, .nStart, .nEnd - // return a list [type, token_string, values] + // return a object {sType, sValue, aLabel} let m = null; try { switch (oToken.sType) { case 'SEPARATOR': - return { sType: oToken.sType, sValue: oToken.sValue, aLabel: [_dSeparator.gl_get(oToken.sValue, "caractère indéterminé")] }; + return { + sType: oToken.sType, + sValue: oToken.sValue, + aLabel: [_dSeparator.gl_get(oToken.sValue, "caractère indéterminé")] + }; break; case 'NUM': - return { sType: oToken.sType, sValue: oToken.sValue, aLabel: ["nombre"] }; + return { + sType: oToken.sType, + sValue: oToken.sValue, + aLabel: ["nombre"] + }; break; case 'LINK': - return { sType: oToken.sType, sValue: oToken.sValue.slice(0,40)+"…", aLabel: ["hyperlien"] }; + return { + sType: oToken.sType, + sValue: oToken.sValue.slice(0, 40) + "…", + aLabel: ["hyperlien"] + }; break; case 'ELPFX': let sTemp = oToken.sValue.replace("’", "").replace("'", "").replace("`", "").toLowerCase(); - return { sType: oToken.sType, sValue: oToken.sValue, aLabel: [_dPFX.gl_get(sTemp, "préfixe élidé inconnu")] }; + return { + sType: oToken.sType, + sValue: oToken.sValue, + aLabel: [_dPFX.gl_get(sTemp, "préfixe élidé inconnu")] + }; break; case 'FOLDER': - return { sType: oToken.sType, sValue: oToken.sValue.slice(0,40)+"…", aLabel: ["dossier"] }; + return { + sType: oToken.sType, + sValue: oToken.sValue.slice(0, 40) + "…", + aLabel: ["dossier"] + }; break; - case 'WORD': + case 'WORD': if (oToken.sValue.gl_count("-") > 4) { - return { sType: "COMPLEX", sValue: oToken.sValue, aLabel: ["élément complexe indéterminé"] }; - } - else if (this.oDict.isValidToken(oToken.sValue)) { + return { + sType: "COMPLEX", + sValue: oToken.sValue, + aLabel: ["élément complexe indéterminé"] + }; + } else if (this.oDict.isValidToken(oToken.sValue)) { let lMorph = this.oDict.getMorph(oToken.sValue); let aElem = []; - for (let s of lMorph){ - if (s.includes(":")) aElem.push( this._formatTags(s) ); + for (let s of lMorph) { + if (s.includes(":")) aElem.push(this._formatTags(s)); } - return { sType: oToken.sType, sValue: oToken.sValue, aLabel: aElem}; - } - else if (m = this._zCompoundWord.exec(oToken.sValue)) { + return { + sType: oToken.sType, + sValue: oToken.sValue, + aLabel: aElem + }; + } else if (m = this._zCompoundWord.exec(oToken.sValue)) { // mots composés let lMorph = this.oDict.getMorph(m[1]); let aElem = []; - for (let s of lMorph){ - if (s.includes(":")) aElem.push( this._formatTags(s) ); + for (let s of lMorph) { + if (s.includes(":")) aElem.push(this._formatTags(s)); } aElem.push("-" + m[2] + ": " + this._formatSuffix(m[2].toLowerCase())); - return { sType: oToken.sType, sValue: oToken.sValue, aLabel: aElem }; - } - else { - return { sType: "UNKNOWN", sValue: oToken.sValue, aLabel: ["inconnu du dictionnaire"] }; + return { + sType: oToken.sType, + sValue: oToken.sValue, + aLabel: aElem + }; + } else { + return { + sType: "UNKNOWN", + sValue: oToken.sValue, + aLabel: ["inconnu du dictionnaire"] + }; } break; } - } - catch (e) { + } catch (e) { helpers.logerror(e); } return null; } @@ -289,13 +328,109 @@ } if (s.endsWith("ous")) { s += '2'; } let nPos = s.indexOf("-"); - return _dAD.get(s.slice(0, nPos)) + " +" + _dAD.get(s.slice(nPos+1)); + return _dAD.get(s.slice(0, nPos)) + " +" + _dAD.get(s.slice(nPos + 1)); + } + + getListOfTokens (sText, bInfo=true) { + let aElem = []; + if (sText !== "") { + for (let oToken of this.oTokenizer.genTokens(sText)) { + if (bInfo) { + let aRes = this.getInfoForToken(oToken); + if (aRes) { + aElem.push(aRes); + } + } else if (oToken.sType !== "SPACE") { + aElem.push(oToken); + } + } + } + return aElem; + } + + generateInfoForTokenList (lToken) { + let aElem = []; + for (let oToken of lToken) { + let aRes = this.getInfoForToken(oToken); + if (aRes) { + aElem.push(aRes); + } + } + return aElem; + } + + getListOfTokensReduc (sText, bInfo=true) { + let aTokenList = this.getListOfTokens(sText.replace("'", "’").trim(), false); + let iKey = 0; + let aElem = []; + do { + let oToken = aTokenList[iKey]; + let sMorphLoc = ''; + let aTokenTempList = [oToken]; + if (oToken.sType == "WORD" || oToken.sType == "ELPFX"){ + let iKeyTree = iKey + 1; + let oLocNode = this.oLocGraph[oToken.sValue.toLowerCase()]; + while (oLocNode) { + let oTokenNext = aTokenList[iKeyTree]; + iKeyTree++; + if (oTokenNext) { + oLocNode = oLocNode[oTokenNext.sValue.toLowerCase()]; + } + if (oLocNode && iKeyTree <= aTokenList.length) { + sMorphLoc = oLocNode[":"]; + aTokenTempList.push(oTokenNext); + } else { + break; + } + } + } + + if (sMorphLoc) { + let sValue = ''; + for (let oTokenWord of aTokenTempList) { + sValue += oTokenWord.sValue+' '; + } + let oTokenLocution = { + 'nStart': aTokenTempList[0].nStart, + 'nEnd': aTokenTempList[aTokenTempList.length-1].nEnd, + 'sType': "LOC", + 'sValue': sValue.replace('’ ','’').trim(), + 'aSubToken': aTokenTempList + }; + if (bInfo) { + let aFormatedTag = []; + for (let sTagLoc of sMorphLoc.split('|') ){ + aFormatedTag.push( this._formatTags(sTagLoc).replace(/( \(él.\))/g,'') ); + } + aElem.push({ + sType: oTokenLocution.sType, + sValue: oTokenLocution.sValue, + aLabel: aFormatedTag, + aSubElem: this.generateInfoForTokenList(aTokenTempList) + }); + } else { + aElem.push(oTokenLocution); + } + iKey = iKey + aTokenTempList.length; + } else { + if (bInfo) { + let aRes = this.getInfoForToken(oToken); + if (aRes) { + aElem.push(aRes); + } + } else { + aElem.push(oToken); + } + iKey++; + } + } while (iKey < aTokenList.length); + return aElem; } } if (typeof(exports) !== 'undefined') { exports.Lexicographe = Lexicographe; } Index: gc_lang/fr/modules-js/locutions_data.json ================================================================== --- gc_lang/fr/modules-js/locutions_data.json +++ gc_lang/fr/modules-js/locutions_data.json @@ -1,1 +1,1 @@ -{"à": {"califourchon": {}, "cœur": {"joie": {}, "ouvert": {}}, "corps": {"perdu": {}}, "perte": {"de": {"vue": {}}}, "visage": {"découvert": {}}}, "par": {"ailleurs": {}, "acquit": {"de": {"conscience": {}}}}} +{"à": {"califourchon": {":": ":Ŵ"}, "contrecœur": {":": ":Ŵ"}, "cœur": {"joie": {":": ":Ŵ"}, "ouvert": {":": ":Ŵ"}}, "corps": {"perdu": {":": ":Ŵ"}}, "bâtons": {"rompus": {":": ":Ŵ"}}, "perte": {"de": {"vue": {":": ":Ŵ"}}}, "visage": {"découvert": {":": ":Ŵ"}}, "vue": {"d’": {"œil": {":": ":Ŵ"}}}, "l’": {"aveuglette": {":": ":Ŵ"}}}, "ad": {"hominem": {":": ":Ŵ"}}, "en": {"tout": {"et": {"pour": {"tout": {":": ":Ŵ"}}}}}, "par": {"ailleurs": {":": ":Ŵ"}, "acquit": {"de": {"conscience": {":": ":Ŵ"}}}}} Index: gc_lang/fr/modules/locutions_data.py ================================================================== --- gc_lang/fr/modules/locutions_data.py +++ gc_lang/fr/modules/locutions_data.py @@ -1,3 +1,3 @@ # generated data (do not edit) -dLocutions = {'à': {'califourchon': {}, 'cœur': {'joie': {}, 'ouvert': {}}, 'corps': {'perdu': {}}, 'perte': {'de': {'vue': {}}}, 'visage': {'découvert': {}}}, 'par': {'ailleurs': {}, 'acquit': {'de': {'conscience': {}}}}} +dLocutions = {'à': {'califourchon': {':': ':Ŵ'}, 'contrecœur': {':': ':Ŵ'}, 'cœur': {'joie': {':': ':Ŵ'}, 'ouvert': {':': ':Ŵ'}}, 'corps': {'perdu': {':': ':Ŵ'}}, 'bâtons': {'rompus': {':': ':Ŵ'}}, 'perte': {'de': {'vue': {':': ':Ŵ'}}}, 'visage': {'découvert': {':': ':Ŵ'}}, 'vue': {'d’': {'œil': {':': ':Ŵ'}}}, 'l’': {'aveuglette': {':': ':Ŵ'}}}, 'ad': {'hominem': {':': ':Ŵ'}}, 'en': {'tout': {'et': {'pour': {'tout': {':': ':Ŵ'}}}}}, 'par': {'ailleurs': {':': ':Ŵ'}, 'acquit': {'de': {'conscience': {':': ':Ŵ'}}}}} Index: gc_lang/fr/webext/content_scripts/panel_lxg.css ================================================================== --- gc_lang/fr/webext/content_scripts/panel_lxg.css +++ gc_lang/fr/webext/content_scripts/panel_lxg.css @@ -7,12 +7,12 @@ } .grammalecte_lxg_list_of_tokens { margin: 5px 0 10px 0; padding: 10px; - background-color: hsla(0, 0%, 96%, 1); - border-radius: 2px; + background-color: hsla(0, 0%, 95%, 1); + border-radius: 5px; } .grammalecte_lxg_list_num { float: right; margin: -12px 0 5px 10px; @@ -33,10 +33,21 @@ font-size: 20px; } .grammalecte_lxg_token_block { margin: 4px 0; +} +.grammalecte_lxg_token_subblock { + margin: 2px 0 2px 20px; + padding: 5px; + border-left: 4px solid hsl(150, 30%, 70%); + background-color: hsl(210, 10%, 90%); + border-radius: 2px; +} +.grammalecte_lxg_token_descr { + margin: 1px; + padding: 1px; } .grammalecte_lxg_token { display: inline-block; background-color: hsl(150, 0%, 50%); color: hsl(0, 0%, 96%); @@ -49,15 +60,18 @@ display: inline-block; padding: 2px 5px; color: hsl(0, 0%, 50%); } .grammalecte_lxg_morph_list { - padding: 2px 0 10px 20px; + padding: 2px 0 2px 20px; } .grammalecte_lxg_morph_elem { color: hsl(0, 0%, 0%); } +.grammalecte_lxg_token_LOC { + background-color: hsla(150, 50%, 30%, 1); +} .grammalecte_lxg_token_WORD { background-color: hsla(150, 50%, 50%, 1); } .grammalecte_lxg_token_ELPFX { background-color: hsla(150, 30%, 50%, 1); Index: gc_lang/fr/webext/content_scripts/panel_lxg.js ================================================================== --- gc_lang/fr/webext/content_scripts/panel_lxg.js +++ gc_lang/fr/webext/content_scripts/panel_lxg.js @@ -27,44 +27,62 @@ addMessage (sMessage) { let xNode = oGrammalecte.createNode("div", {className: "grammalecte_panel_message", textContent: sMessage}); this._xContentNode.appendChild(xNode); } - addListOfTokens (lTokens) { + addListOfTokens (lToken) { try { - if (lTokens) { + if (lToken) { this._nCount += 1; - let xNodeDiv = oGrammalecte.createNode("div", {className: "grammalecte_lxg_list_of_tokens"}); - xNodeDiv.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_list_num", textContent: this._nCount})); - for (let oToken of lTokens) { - xNodeDiv.appendChild(this._createTokenNode(oToken)); + let xTokenList = oGrammalecte.createNode("div", {className: "grammalecte_lxg_list_of_tokens"}); + xTokenList.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_list_num", textContent: this._nCount})); + for (let oToken of lToken) { + xTokenList.appendChild(this._createTokenBlock(oToken)); + } + this._xContentNode.appendChild(xTokenList); + } + } + catch (e) { + showError(e); + } + } + + _createTokenBlock (oToken) { + let xTokenBlock = oGrammalecte.createNode("div", {className: "grammalecte_lxg_token_block"}); + xTokenBlock.appendChild(this._createTokenDescr(oToken)); + if (oToken.aSubElem) { + let xSubBlock = oGrammalecte.createNode("div", {className: "grammalecte_lxg_token_subblock"}); + for (let oSubElem of oToken.aSubElem) { + xSubBlock.appendChild(this._createTokenDescr(oSubElem)); + } + xTokenBlock.appendChild(xSubBlock); + } + return xTokenBlock; + } + + _createTokenDescr (oToken) { + try { + let xTokenDescr = oGrammalecte.createNode("div", {className: "grammalecte_lxg_token_descr"}); + xTokenDescr.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_token grammalecte_lxg_token_" + oToken.sType, textContent: oToken.sValue})); + xTokenDescr.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_token_colon", textContent: ":"})); + if (oToken.aLabel.length === 1) { + xTokenDescr.appendChild(document.createTextNode(oToken.aLabel[0])); + } else { + let xMorphList = oGrammalecte.createNode("div", {className: "grammalecte_lxg_morph_list"}); + for (let sLabel of oToken.aLabel) { + xMorphList.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_morph_elem", textContent: "• " + sLabel})); } - this._xContentNode.appendChild(xNodeDiv); + xTokenDescr.appendChild(xMorphList); } + return xTokenDescr; } catch (e) { showError(e); } } - _createTokenNode (oToken) { - let xTokenNode = oGrammalecte.createNode("div", {className: "grammalecte_lxg_token_block"}); - xTokenNode.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_token grammalecte_lxg_token_" + oToken.sType, textContent: oToken.sValue})); - xTokenNode.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_token_colon", textContent: ":"})); - if (oToken.aLabel.length === 1) { - xTokenNode.appendChild(document.createTextNode(oToken.aLabel[0])); - } else { - let xTokenList = oGrammalecte.createNode("div", {className: "grammalecte_lxg_morph_list"}); - for (let sLabel of oToken.aLabel) { - xTokenList.appendChild(oGrammalecte.createNode("div", {className: "grammalecte_lxg_morph_elem", textContent: "• " + sLabel})); - } - xTokenNode.appendChild(xTokenList); - } - return xTokenNode; - } - setHidden (sClass, bHidden) { for (let xNode of document.getElementsByClassName(sClass)) { xNode.hidden = bHidden; } } } Index: gc_lang/fr/webext/gce_worker.js ================================================================== --- gc_lang/fr/webext/gce_worker.js +++ gc_lang/fr/webext/gce_worker.js @@ -138,10 +138,11 @@ let oDict = null; let oTokenizer = null; let oLxg = null; let oTest = null; +let oLocution = null; /* Technical note: This worker don’t work as a PromiseWorker (which returns a promise), so when we send request @@ -160,15 +161,18 @@ mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json")); //console.log("[Worker] Modules have been initialized…"); gc_engine.load(sContext, sExtensionPath+"grammalecte/_dictionaries"); oDict = gc_engine.getDictionary(); oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json"); - oLxg = new Lexicographe(oDict); + oTokenizer = new Tokenizer("fr"); + + oLocution = helpers.loadFile(sExtensionPath + "/grammalecte/fr/locutions_data.json"); + + oLxg = new Lexicographe(oDict, oTokenizer, oLocution); if (dOptions !== null) { gc_engine.setOptions(dOptions); } - oTokenizer = new Tokenizer("fr"); //tests(); bInitDone = true; } else { console.log("[Worker] Already initialized…") } @@ -296,23 +300,15 @@ function getListOfTokens (sText, dInfo={}) { try { for (let sParagraph of text.getParagraph(sText)) { if (sParagraph.trim() !== "") { - let aElem = []; - let aRes = null; - for (let oToken of oTokenizer.genTokens(sParagraph)) { - aRes = oLxg.getInfoForToken(oToken); - if (aRes) { - aElem.push(aRes); - } - } - postMessage(createResponse("getListOfTokens", aElem, dInfo, false)); + postMessage(createResponse("getListOfTokens", oLxg.getListOfTokensReduc(sParagraph, true), dInfo, false)); } } postMessage(createResponse("getListOfTokens", null, dInfo, true)); } catch (e) { helpers.logerror(e); postMessage(createResponse("getListOfTokens", createErrorResult(e, "no tokens"), dInfo, true, true)); } }