Index: gc_lang/fr/perf_memo.txt ================================================================== --- gc_lang/fr/perf_memo.txt +++ gc_lang/fr/perf_memo.txt @@ -26,5 +26,7 @@ 0.6.2 2018.02.19 19:06 5.51302 1.29359 0.874157 0.260415 0.271596 0.290641 0.684754 0.376905 0.0815201 0.00919633 (spelling normalization) 1.0 2018.11.23 10:59 2.88577 0.702486 0.485648 0.139897 0.14079 0.148125 0.348751 0.201061 0.0360297 0.0043535 (x2, with new GC engine) 1.1 2019.05.16 09:42 1.50743 0.360923 0.261113 0.0749272 0.0763827 0.0771537 0.180504 0.102942 0.0182762 0.0021925 (×2, but new processor: AMD Ryzen 7 2700X) 1.2.1 2019.08.06 20:57 1.42886 0.358425 0.247356 0.0704405 0.0754886 0.0765604 0.177197 0.0988517 0.0188103 0.0020243 1.6.0 2020.01.03 20:22 1.38847 0.346214 0.240242 0.0709539 0.0737499 0.0748733 0.176477 0.0969171 0.0187857 0.0025143 (nouveau dictionnaire avec lemmes masculin) +1.9.0 2020.04.20 19:57 1.51183 0.369546 0.25681 0.0734314 0.0764396 0.0785668 0.183922 0.103674 0.0185812 0.002099 (NFC normalization) + Index: graphspell-js/tokenizer.js ================================================================== --- graphspell-js/tokenizer.js +++ graphspell-js/tokenizer.js @@ -42,11 +42,11 @@ [/^(?:l|d|n|m|t|s|j|c|ç|lorsqu|puisqu|jusqu|quoiqu|qu|presqu|quelqu)['’´‘′`ʼ]/i, 'WORD_ELIDED'], [/^\d\d?[h:]\d\d(?:[m:]\d\ds?|)\b/, 'HOUR'], [/^\d+(?:ers?\b|res?\b|è[rm]es?\b|i[èe][mr]es?\b|de?s?\b|nde?s?\b|ès?\b|es?\b|ᵉʳˢ?|ʳᵉˢ?|ᵈᵉ?ˢ?|ⁿᵈᵉ?ˢ?|ᵉˢ?)/, 'WORD_ORDINAL'], [/^\d+(?:[.,]\d+|)/, 'NUM'], [/^[&%‰€$+±=*/<>⩾⩽#|×¥£§¢¬÷@-]/, 'SIGN'], - [/^[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯff-stᴀ-ᶿᵉʳˢⁿᵈ_]+(?:[’'`-][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯff-stᴀ-ᶿᵉʳˢⁿᵈ_]+)*/, 'WORD'] + [/^[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯff-stᴀ-ᶿ\u0300-\u036fᵉʳˢⁿᵈ_]+(?:[’'`-][a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯff-stᴀ-ᶿ\u0300-\u036fᵉʳˢⁿᵈ_]+)*/, 'WORD'] ] }; class Tokenizer { @@ -72,11 +72,11 @@ for (let [zRegex, sType] of this.aRules) { if (sType !== "SPACE" || bWithSpaces) { try { if ((m = zRegex.exec(sText)) !== null) { iToken += 1; - yield { "i": iToken, "sType": sType, "sValue": m[0], "nStart": iNext, "nEnd": iNext + m[0].length }; + yield { "i": iToken, "sType": sType, "sValue": m[0], "nStart": iNext, "nEnd": iNext + m[0].length }; // m[0].normalize("NFC") not usefull at the moment iCut = m[0].length; break; } } catch (e) { Index: graphspell/tokenizer.py ================================================================== --- graphspell/tokenizer.py +++ graphspell/tokenizer.py @@ -3,10 +3,12 @@ using regular expressions """ import re +from unicodedata import normalize + _PATTERNS = { "default": ( r'(?P/(?:bin|boot|dev|etc|home|lib|mnt|opt|root|sbin|tmp|usr|var|Bureau|Documents|Images|Musique|Public|Téléchargements|Vidéos)(?:/[\w.()-]+)*)', r'(?P[a-zA-Z]:\\(?:Program Files(?: [(]x86[)]|)|[\w.()]+)(?:\\[\w.()-]+)*)', @@ -34,11 +36,11 @@ r"(?P(?:l|d|n|m|t|s|j|c|ç|lorsqu|puisqu|jusqu|quoiqu|qu|presqu|quelqu)['’´‘′`ʼ])", r'(?P\d+(?:ers?|res?|è[rm]es?|i[èe][mr]es?|de?s?|nde?s?|ès?|es?|ᵉʳˢ?|ʳᵉˢ?|ᵈᵉ?ˢ?|ⁿᵈᵉ?ˢ?|ᵉˢ?)\b)', r'(?P\d\d?[h:]\d\d(?:[m:]\d\ds?|)\b)', r'(?P\d+(?:[.,]\d+|))', r'(?P[&%‰€$+±=*/<>⩾⩽#|×¥£¢§¬÷@-])', - r"(?P\w+(?:[’'`-]\w+)*)" + r"(?P[\w\u0300-\u036f]+(?:[’'`-][\w\u0300-\u036f]+)*)" ) } class Tokenizer: @@ -54,13 +56,13 @@ "generator: tokenize " i = 0 if bStartEndToken: yield { "i": 0, "sType": "INFO", "sValue": "", "nStart": 0, "nEnd": 0, "lMorph": [""] } for i, m in enumerate(self.zToken.finditer(sText), 1): - yield { "i": i, "sType": m.lastgroup, "sValue": m.group(), "nStart": m.start(), "nEnd": m.end() } + yield { "i": i, "sType": m.lastgroup, "sValue": normalize("NFC", m.group()), "nStart": m.start(), "nEnd": m.end() } if bStartEndToken: iEnd = len(sText) yield { "i": i+1, "sType": "INFO", "sValue": "", "nStart": iEnd, "nEnd": iEnd, "lMorph": [""] } def getTokenTypes (self): "returns list of token types as tuple (token name, regex)" return [ sRegex[4:-1].split(">") for sRegex in _PATTERNS[self.sLang] ]