Changes In Branch gcerw Through [759321730c] Excluding Merge-Ins
This is equivalent to a diff from 787d6a7582 to 759321730c
|
2020-04-05
| ||
| 09:54 | [fr] ajustements check-in: 52430380b1 user: olr tags: trunk, fr | |
| 08:39 | [core][cli][graphsell][lo][py] move lexicographer from gc engine to graphspell check-in: ba3c939f60 user: olr tags: cli, core, lo, graphspell, gcerw | |
|
2020-04-04
| ||
| 15:54 | [core][fr][py] gc_engine.py as primary module check-in: 759321730c user: olr tags: fr, core, gcerw | |
|
2020-04-03
| ||
| 23:39 | [core][py] add gc_engine_func.py check-in: d12fb1528a user: olr tags: core, gcerw | |
| 23:38 | [core][py] gc engine: code rewriting check-in: 16a21a38cd user: olr tags: core, gcerw | |
| 17:24 | [fr] bug: mauvaise commande check-in: 787d6a7582 user: olr tags: trunk, fr | |
| 13:33 | [fr] ajustements check-in: 502d7277a2 user: olr tags: trunk, fr | |
Modified gc_core/py/__init__.py from [49f46a05ff] to [3c70db889b].
1 2 3 4 | """ Grammar checker """ | | | 1 2 3 4 5 |
"""
Grammar checker
"""
from .${lang}.gc_engine import *
|
Deleted gc_core/py/grammar_checker.py version [50b054f72f].
|
| < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < |
Modified gc_core/py/lang_core/gc_engine.py from [c796a47a03] to [95762aa8e3].
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
"""
Grammalecte
Grammar checker engine
"""
import re
import traceback
#import unicodedata
from itertools import chain
from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from .. import text
from . import gc_options
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_bWriterError = True
except ImportError:
_bWriterError = False
| > > > > | | < | | < < < > > > > < < | < < | > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
"""
Grammalecte
Grammar checker engine
"""
import re
import traceback
import json
import importlib
#import unicodedata
from itertools import chain
from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from .. import text
from . import gc_engine_func as gce_func
from . import gc_options
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_bWriterError = True
except ImportError:
_bWriterError = False
#__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
# "load", "parse", "getSpellChecker", "getTextFormatter", "getLexicographer" \
# "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules", "setWriterUnderliningStyle" ]
__version__ = "${version}"
lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"
# Modules
_rules = None # module gc_rules
_rules_graph = None # module gc_rules_graph
# Tools
_oSpellChecker = None
_oTokenizer = None
_oLexicographer = None
# Data
_aIgnoredRules = set()
# Writer underlining style
_dOptionsColors = None
_bMulticolor = True
_nUnderliningStyle = 0
#### Initialization
def load (sContext="Python", sColorType="aRGB"):
"initialization of the grammar checker"
global _oSpellChecker
global _dOptionsColors
global _oTokenizer
try:
_oSpellChecker = SpellChecker("${lang}", "${dic_main_filename_py}", "${dic_community_filename_py}", "${dic_personal_filename_py}")
_oSpellChecker.activateStorage()
_oTokenizer = _oSpellChecker.getTokenizer()
gce_func.load(sContext, _oSpellChecker)
gc_options.load(sContext)
_dOptionsColors = gc_options.getOptionsColors(sContext, sColorType)
except:
traceback.print_exc()
#### Tools
def getSpellChecker ():
"return the spellchecker object"
return _oSpellChecker
def getLexicographer ():
"load and return the lexicographer"
global _oLexicographer
if _oLexicographer is None:
lxg = importlib.import_module(".lexicographe", "grammalecte.${lang}")
_oLexicographer = lxg.Lexicographe(_oSpellChecker)
return _oLexicographer
#### Rules
def _getRules (bParagraph):
try:
if not bParagraph:
return _rules.lSentenceRules
|
| ︙ | ︙ | |||
127 128 129 130 131 132 133 |
def reactivateRule (sRuleId):
"(re)activate rule <sRuleId>"
_aIgnoredRules.discard(sRuleId)
def listRules (sFilter=None):
| | | 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
def reactivateRule (sRuleId):
"(re)activate rule <sRuleId>"
_aIgnoredRules.discard(sRuleId)
def listRules (sFilter=None):
"generator: returns tuple (sRuleType, sOption, sLineId, sRuleId)"
if sFilter:
try:
zFilter = re.compile(sFilter)
except re.error:
echo("# Error. List rules: wrong regex.")
sFilter = None
# regex rules
|
| ︙ | ︙ | |||
153 154 155 156 157 158 159 |
def displayRules (sFilter=None):
"display the name of rules, with the filter <sFilter>"
echo("List of rules. Filter: << " + str(sFilter) + " >>")
for sOption, sLineId, sRuleId, sType in listRules(sFilter):
echo("{:<8} {:<10} {:<10} {}".format(sOption, sLineId, sRuleId, sType))
| < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
def displayRules (sFilter=None):
"display the name of rules, with the filter <sFilter>"
echo("List of rules. Filter: << " + str(sFilter) + " >>")
for sOption, sLineId, sRuleId, sType in listRules(sFilter):
echo("{:<8} {:<10} {:<10} {}".format(sOption, sLineId, sRuleId, sType))
def setWriterUnderliningStyle (sStyle="BOLDWAVE", bMulticolor=True):
"set underlining style for Writer (WAVE, BOLDWAVE, BOLD)"
global _nUnderliningStyle
global _bMulticolor
# https://api.libreoffice.org/docs/idl/ref/FontUnderline_8idl.html
# WAVE: 10, BOLD: 12, BOLDWAVE: 18 DASH: 5
|
| ︙ | ︙ | |||
217 218 219 220 221 222 223 224 225 226 227 228 229 230 |
_nUnderliningStyle = 5
else:
_nUnderliningStyle = 0
_bMulticolor = bMulticolor
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"init point to analyse <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
oText = TextParser(sText)
return oText.parse(sCountry, bDebug, dOptions, bContext, bFullInfo)
| > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
_nUnderliningStyle = 5
else:
_nUnderliningStyle = 0
_bMulticolor = bMulticolor
#### Parsing
def getParagraphErrors (sText, dOptions=None, bContext=False, bSpellSugg=False, bDebug=False):
"returns a tuple: (grammar errors, spelling errors)"
aGrammErrs = parse(sText, "FR", bDebug=bDebug, dOptions=dOptions, bContext=bContext)
aSpellErrs = _oSpellChecker.parseParagraph(sText, bSpellSugg)
return aGrammErrs, aSpellErrs
def getParagraphWithErrors (sText, dOptions=None, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100, bDebug=False):
"parse text and return a readable text with underline errors"
aGrammErrs, aSpellErrs = getParagraphErrors(sText, dOptions, False, bSpellSugg, bDebug)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ("", [])
return text.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)
def getParagraphErrorsAsJSON (iIndex, sText, dOptions=None, bContext=False, bEmptyIfNoErrors=False, bSpellSugg=False, bReturnText=False, lLineSet=None, bDebug=False):
"parse text and return errors as a JSON string"
aGrammErrs, aSpellErrs = getParagraphErrors(sText, dOptions, bContext, bSpellSugg, bDebug)
aGrammErrs = list(aGrammErrs)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ""
if lLineSet:
aGrammErrs, aSpellErrs = text.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
if bReturnText:
return json.dumps({ "iParagraph": iIndex, "sText": sText, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
return json.dumps({ "iParagraph": iIndex, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"init point to analyse <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
oText = TextParser(sText)
return oText.parse(sCountry, bDebug, dOptions, bContext, bFullInfo)
|
| ︙ | ︙ | |||
260 261 262 263 264 265 266 |
#for nPos, dToken in self.dTokenPos.items():
# s += "{}\t{}\n".format(nPos, dToken)
return s
def parse (self, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"analyses <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
#sText = unicodedata.normalize("NFC", sText)
| | | | 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 |
#for nPos, dToken in self.dTokenPos.items():
# s += "{}\t{}\n".format(nPos, dToken)
return s
def parse (self, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"analyses <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
#sText = unicodedata.normalize("NFC", sText)
dOpt = dOptions or gc_options.dOptions
bShowRuleId = gc_options.dOptions.get('idrule', False)
# parse paragraph
try:
self.parseText(self.sText, self.sText0, True, 0, sCountry, dOpt, bShowRuleId, bDebug, bContext)
except:
raise
if bFullInfo:
lParagraphErrors = list(self.dError.values())
|
| ︙ | ︙ | |||
338 339 340 341 342 343 344 |
for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(sText):
bCondMemo = None
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
| | | | 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 |
for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(sText):
bCondMemo = None
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
bCondMemo = not sFuncCond or getattr(gce_func, sFuncCond)(sText, sText0, m, self.dTokenPos, sCountry, bCondMemo)
if bCondMemo:
if bDebug:
echo("RULE: " + sLineId)
if cActionType == "-":
# grammar error
nErrorStart = nOffset + m.start(eAct[0])
if nErrorStart not in self.dError or nPriority > self.dErrorPriority.get(nErrorStart, -1):
self.dError[nErrorStart] = self._createErrorFromRegex(sText, sText0, sWhat, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bShowRuleId, sOption, bContext)
self.dErrorPriority[nErrorStart] = nPriority
self.dSentenceError[nErrorStart] = self.dError[nErrorStart]
elif cActionType == "~":
# text processor
sText = self.rewriteText(sText, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo("~ " + sText + " -- " + m.group(eAct[0]) + " # " + sLineId)
elif cActionType == "=":
# disambiguation
if not bParagraph:
getattr(gce_func, sWhat)(sText, m, self.dTokenPos)
if bDebug:
echo("= " + m.group(0) + " # " + sLineId)
elif cActionType == ">":
# we do nothing, this test is just a condition to apply all following actions
pass
else:
echo("# error: unknown action at " + sLineId)
|
| ︙ | ︙ | |||
584 585 586 587 588 589 590 |
for sLineId, nextNodeKey in dNode.items():
bCondMemo = None
for sRuleId in dGraph[nextNodeKey]:
try:
if bDebug:
echo(" >TRY: " + sRuleId + " " + sLineId)
_, sOption, sFuncCond, cActionType, sWhat, *eAct = _rules_graph.dRule[sRuleId]
| | | | | | | | | 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 |
for sLineId, nextNodeKey in dNode.items():
bCondMemo = None
for sRuleId in dGraph[nextNodeKey]:
try:
if bDebug:
echo(" >TRY: " + sRuleId + " " + sLineId)
_, sOption, sFuncCond, cActionType, sWhat, *eAct = _rules_graph.dRule[sRuleId]
# Suggestion [ sActionLineId, option, condition, "-", replacement/suggestion/action, iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, sURL ]
# TextProcessor [ sActionLineId, option, condition, "~", replacement/suggestion/action, iTokenStart, iTokenEnd, bCaseSvty ]
# Disambiguator [ sActionLineId, option, condition, "=", replacement/suggestion/action ]
# Tag [ sActionLineId, option, condition, "/", replacement/suggestion/action, iTokenStart, iTokenEnd ]
# Immunity [ sActionLineId, option, condition, "!", "", iTokenStart, iTokenEnd ]
# Test [ sActionLineId, option, condition, ">", "" ]
if not sOption or dOptions.get(sOption, False):
bCondMemo = not sFuncCond or getattr(gce_func, sFuncCond)(self.lToken, nTokenOffset, nLastToken, sCountry, bCondMemo, self.dTags, self.sSentence, self.sSentence0)
if bCondMemo:
if cActionType == "-":
# grammar error
iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, sURL = eAct
nTokenErrorStart = nTokenOffset + iTokenStart if iTokenStart > 0 else nLastToken + iTokenStart
if "bImmune" not in self.lToken[nTokenErrorStart]:
nTokenErrorEnd = nTokenOffset + iTokenEnd if iTokenEnd > 0 else nLastToken + iTokenEnd
|
| ︙ | ︙ | |||
617 618 619 620 621 622 623 |
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
self._tagAndPrepareTokenForRewriting(sWhat, nTokenStart, nTokenEnd, nTokenOffset, nLastToken, eAct[2], bDebug)
bChange = True
if bDebug:
echo(" TEXT_PROCESSOR: [{}:{}] > {}".format(self.lToken[nTokenStart]["sValue"], self.lToken[nTokenEnd]["sValue"], sWhat))
elif cActionType == "=":
# disambiguation
| | | 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 |
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
self._tagAndPrepareTokenForRewriting(sWhat, nTokenStart, nTokenEnd, nTokenOffset, nLastToken, eAct[2], bDebug)
bChange = True
if bDebug:
echo(" TEXT_PROCESSOR: [{}:{}] > {}".format(self.lToken[nTokenStart]["sValue"], self.lToken[nTokenEnd]["sValue"], sWhat))
elif cActionType == "=":
# disambiguation
getattr(gce_func, sWhat)(self.lToken, nTokenOffset, nLastToken)
if bDebug:
echo(" DISAMBIGUATOR: ({}) [{}:{}]".format(sWhat, self.lToken[nTokenOffset+1]["sValue"], self.lToken[nLastToken]["sValue"]))
elif cActionType == ">":
# we do nothing, this test is just a condition to apply all following actions
if bDebug:
echo(" COND_OK")
elif cActionType == "/":
|
| ︙ | ︙ | |||
672 673 674 675 676 677 678 |
return bChange
def _createErrorFromRegex (self, sText, sText0, sRepl, nOffset, m, iGroup, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext):
nStart = nOffset + m.start(iGroup)
nEnd = nOffset + m.end(iGroup)
# suggestions
if sRepl[0:1] == "=":
| | | | | | 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 |
return bChange
def _createErrorFromRegex (self, sText, sText0, sRepl, nOffset, m, iGroup, sLineId, sRuleId, bUppercase, sMsg, sURL, bShowRuleId, sOption, bContext):
nStart = nOffset + m.start(iGroup)
nEnd = nOffset + m.end(iGroup)
# suggestions
if sRepl[0:1] == "=":
sSugg = getattr(gce_func, sRepl[1:])(sText, m)
lSugg = sSugg.split("|") if sSugg else []
elif sRepl == "_":
lSugg = []
else:
lSugg = m.expand(sRepl).split("|")
if bUppercase and lSugg and m.group(iGroup)[0:1].isupper():
lSugg = list(map(lambda s: s[0:1].upper()+s[1:], lSugg))
# Message
sMessage = getattr(gce_func, sMsg[1:])(sText, m) if sMsg[0:1] == "=" else m.expand(sMsg)
if bShowRuleId:
sMessage += " #" + sLineId + " / " + sRuleId
#
if _bWriterError:
return self._createErrorForWriter(nStart, nEnd - nStart, sRuleId, sOption, sMessage, lSugg, sURL)
return self._createErrorAsDict(nStart, nEnd, sLineId, sRuleId, sOption, sMessage, lSugg, sURL, bContext)
def _createErrorFromTokens (self, sSugg, nTokenOffset, nLastToken, iFirstToken, nStart, nEnd, sLineId, sRuleId, bCaseSvty, sMsg, sURL, bShowRuleId, sOption, bContext):
# suggestions
if sSugg[0:1] == "=":
sSugg = getattr(gce_func, sSugg[1:])(self.lToken, nTokenOffset, nLastToken)
lSugg = sSugg.split("|") if sSugg else []
elif sSugg == "_":
lSugg = []
else:
lSugg = self._expand(sSugg, nTokenOffset, nLastToken).split("|")
if bCaseSvty and lSugg and self.lToken[iFirstToken]["sValue"][0:1].isupper():
lSugg = list(map(lambda s: s[0:1].upper()+s[1:], lSugg))
# Message
sMessage = getattr(gce_func, sMsg[1:])(self.lToken, nTokenOffset, nLastToken) if sMsg[0:1] == "=" else self._expand(sMsg, nTokenOffset, nLastToken)
if bShowRuleId:
sMessage += " #" + sLineId + " / " + sRuleId
#
if _bWriterError:
return self._createErrorForWriter(nStart, nEnd - nStart, sRuleId, sOption, sMessage, lSugg, sURL)
return self._createErrorAsDict(nStart, nEnd, sLineId, sRuleId, sOption, sMessage, lSugg, sURL, bContext)
|
| ︙ | ︙ | |||
765 766 767 768 769 770 771 |
if sRepl == "*":
sNew = " " * nLen
elif sRepl == "_":
sNew = "_" * nLen
elif sRepl == "@":
sNew = "@" * nLen
elif sRepl[0:1] == "=":
| | | 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 |
if sRepl == "*":
sNew = " " * nLen
elif sRepl == "_":
sNew = "_" * nLen
elif sRepl == "@":
sNew = "@" * nLen
elif sRepl[0:1] == "=":
sNew = getattr(gce_func, sRepl[1:])(sText, m)
sNew = sNew + " " * (nLen-len(sNew))
if bUppercase and m.group(iGroup)[0:1].isupper():
sNew = sNew.capitalize()
else:
sNew = m.expand(sRepl)
sNew = sNew + " " * (nLen-len(sNew))
return sText[0:m.start(iGroup)] + sNew + sText[m.end(iGroup):]
|
| ︙ | ︙ | |||
795 796 797 798 799 800 801 |
if nTokenRewriteEnd - nTokenRewriteStart == 0:
self.lToken[nTokenRewriteStart]["sNewValue"] = "_"
else:
for i in range(nTokenRewriteStart, nTokenRewriteEnd+1):
self.lToken[i]["sNewValue"] = "_"
else:
if sWhat.startswith("="):
| | | 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 |
if nTokenRewriteEnd - nTokenRewriteStart == 0:
self.lToken[nTokenRewriteStart]["sNewValue"] = "_"
else:
for i in range(nTokenRewriteStart, nTokenRewriteEnd+1):
self.lToken[i]["sNewValue"] = "_"
else:
if sWhat.startswith("="):
sWhat = getattr(gce_func, sWhat[1:])(self.lToken, nTokenOffset, nLastToken)
else:
sWhat = self._expand(sWhat, nTokenOffset, nLastToken)
bUppercase = bCaseSvty and self.lToken[nTokenRewriteStart]["sValue"][0:1].isupper()
if nTokenRewriteEnd - nTokenRewriteStart == 0:
# one token
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
|
| ︙ | ︙ | |||
869 870 871 872 873 874 875 |
except KeyError:
echo(self)
echo(dToken)
if bDebug:
echo(" TEXT REWRITED: " + self.sSentence)
self.lToken.clear()
self.lToken = lNewToken
| < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 868 869 870 871 872 873 874 |
except KeyError:
echo(self)
echo(dToken)
if bDebug:
echo(" TEXT REWRITED: " + self.sSentence)
self.lToken.clear()
self.lToken = lNewToken
|
Added gc_core/py/lang_core/gc_engine_func.py version [42827fa728].
> > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 |
"""
Grammar checking functions
"""
# generated code, do not edit
# source: gc_core/py/lang_core/gc_engine_func.py
import re
from . import gc_options
_sAppContext = "Python" # what software is running
_oSpellChecker = None
def load (sContext, oSpellChecker):
global _sAppContext
global _oSpellChecker
_sAppContext = sContext
_oSpellChecker = oSpellChecker
#### common functions
def option (sOpt):
"return True if option <sOpt> is active"
return gc_options.dOptions.get(sOpt, False)
#### Functions to get text outside pattern scope
# warning: check compile_rules.py to understand how it works
_zNextWord = re.compile(r" +(\w[\w-]*)")
_zPrevWord = re.compile(r"(\w[\w-]*) +$")
def nextword (s, iStart, n):
"get the nth word of the input string or empty string"
m = re.match("(?: +[\\w%-]+){" + str(n-1) + "} +([\\w%-]+)", s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword (s, iEnd, n):
"get the (-)nth word of the input string or empty string"
m = re.search("([\\w%-]+) +(?:[\\w%-]+ +){" + str(n-1) + "}$", s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def nextword1 (s, iStart):
"get next word (optimization)"
m = _zNextWord.match(s[iStart:])
if not m:
return None
return (iStart+m.start(1), m.group(1))
def prevword1 (s, iEnd):
"get previous word (optimization)"
m = _zPrevWord.search(s[:iEnd])
if not m:
return None
return (m.start(1), m.group(1))
def look (s, sPattern, sNegPattern=None):
"seek sPattern in s (before/after/fulltext), if sNegPattern not in s"
if sNegPattern and re.search(sNegPattern, s):
return False
if re.search(sPattern, s):
return True
return False
def look_chk1 (dTokenPos, s, nOffset, sPattern, sPatternGroup1, sNegPatternGroup1=""):
"returns True if s has pattern sPattern and m.group(1) has pattern sPatternGroup1"
m = re.search(sPattern, s)
if not m:
return False
try:
sWord = m.group(1)
nPos = m.start(1) + nOffset
except IndexError:
return False
return morph(dTokenPos, (nPos, sWord), sPatternGroup1, sNegPatternGroup1)
#### Analyse groups for regex rules
def displayInfo (dTokenPos, tWord):
"for debugging: retrieve info of word"
if not tWord:
print("> nothing to find")
return True
lMorph = _oSpellChecker.getMorph(tWord[1])
if not lMorph:
print("> not in dictionary")
return True
print("TOKENS:", dTokenPos)
if tWord[0] in dTokenPos and "lMorph" in dTokenPos[tWord[0]]:
print("DA: " + str(dTokenPos[tWord[0]]["lMorph"]))
print("FSA: " + str(lMorph))
return True
def morph (dTokenPos, tWord, sPattern, sNegPattern="", bNoWord=False):
"analyse a tuple (position, word), returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation on)"
if not tWord:
return bNoWord
lMorph = dTokenPos[tWord[0]]["lMorph"] if tWord[0] in dTokenPos and "lMorph" in dTokenPos[tWord[0]] else _oSpellChecker.getMorph(tWord[1])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
def analyse (sWord, sPattern, sNegPattern=""):
"analyse a word, returns True if not sNegPattern in word morphologies and sPattern in word morphologies (disambiguation off)"
lMorph = _oSpellChecker.getMorph(sWord)
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
#### Analyse tokens for graph rules
def g_value (dToken, sValues, nLeft=None, nRight=None):
"test if <dToken['sValue']> is in sValues (each value should be separated with |)"
sValue = "|"+dToken["sValue"]+"|" if nLeft is None else "|"+dToken["sValue"][slice(nLeft, nRight)]+"|"
if sValue in sValues:
return True
if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to make valid words such as "Laissez-les", "Passe-partout".
if sValue.lower() in sValues:
return True
elif dToken["sValue"].isupper():
#if sValue.lower() in sValues:
# return True
sValue = "|"+sValue[1:].capitalize()
if sValue in sValues:
return True
sValue = sValue.lower()
if sValue in sValues:
return True
return False
def g_morph (dToken, sPattern, sNegPattern="", nLeft=None, nRight=None, bMemorizeMorph=True):
"analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies"
if "lMorph" in dToken:
lMorph = dToken["lMorph"]
else:
if nLeft is not None:
lMorph = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
if bMemorizeMorph:
dToken["lMorph"] = lMorph
else:
lMorph = _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
def g_analyse (dToken, sPattern, sNegPattern="", nLeft=None, nRight=None, bMemorizeMorph=True):
"analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)"
if nLeft is not None:
lMorph = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
if bMemorizeMorph:
dToken["lMorph"] = lMorph
else:
lMorph = _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
return all(zPattern.search(sMorph) for sMorph in lMorph)
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
def g_merged_analyse (dToken1, dToken2, cMerger, sPattern, sNegPattern="", bSetMorph=True):
"merge two token values, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)"
lMorph = _oSpellChecker.getMorph(dToken1["sValue"] + cMerger + dToken2["sValue"])
if not lMorph:
return False
# check negative condition
if sNegPattern:
if sNegPattern == "*":
# all morph must match sPattern
zPattern = re.compile(sPattern)
bResult = all(zPattern.search(sMorph) for sMorph in lMorph)
if bResult and bSetMorph:
dToken1["lMorph"] = lMorph
return bResult
zNegPattern = re.compile(sNegPattern)
if any(zNegPattern.search(sMorph) for sMorph in lMorph):
return False
# search sPattern
zPattern = re.compile(sPattern)
bResult = any(zPattern.search(sMorph) for sMorph in lMorph)
if bResult and bSetMorph:
dToken1["lMorph"] = lMorph
return bResult
def g_tag_before (dToken, dTags, sTag):
"returns True if <sTag> is present on tokens before <dToken>"
if sTag not in dTags:
return False
if dToken["i"] > dTags[sTag][0]:
return True
return False
def g_tag_after (dToken, dTags, sTag):
"returns True if <sTag> is present on tokens after <dToken>"
if sTag not in dTags:
return False
if dToken["i"] < dTags[sTag][1]:
return True
return False
def g_tag (dToken, sTag):
"returns True if <sTag> is present on token <dToken>"
return "aTags" in dToken and sTag in dToken["aTags"]
def g_meta (dToken, sType):
"returns True if <sType> is equal to the token type"
return dToken["sType"] == sType
def g_space_between_tokens (dToken1, dToken2, nMin, nMax=None):
"checks if spaces between tokens is >= <nMin> and <= <nMax>"
nSpace = dToken2["nStart"] - dToken1["nEnd"]
if nSpace < nMin:
return False
if nMax is not None and nSpace > nMax:
return False
return True
def g_token (lToken, i):
"return token at index <i> in lToken (or the closest one)"
if i < 0:
return lToken[0]
if i >= len(lToken):
return lToken[-1]
return lToken[i]
#### Disambiguator for regex rules
def select (dTokenPos, nPos, sWord, sPattern, lDefault=None):
"Disambiguation: select morphologies of <sWord> matching <sPattern>"
if not sWord:
return True
if nPos not in dTokenPos:
print("Error. There should be a token at this position: ", nPos)
return True
lMorph = _oSpellChecker.getMorph(sWord)
if not lMorph or len(lMorph) == 1:
return True
lSelect = [ sMorph for sMorph in lMorph if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dTokenPos[nPos]["lMorph"] = lSelect
elif lDefault:
dTokenPos[nPos]["lMorph"] = lDefault
return True
def exclude (dTokenPos, nPos, sWord, sPattern, lDefault=None):
"Disambiguation: exclude morphologies of <sWord> matching <sPattern>"
if not sWord:
return True
if nPos not in dTokenPos:
print("Error. There should be a token at this position: ", nPos)
return True
lMorph = _oSpellChecker.getMorph(sWord)
if not lMorph or len(lMorph) == 1:
return True
lSelect = [ sMorph for sMorph in lMorph if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dTokenPos[nPos]["lMorph"] = lSelect
elif lDefault:
dTokenPos[nPos]["lMorph"] = lDefault
return True
def define (dTokenPos, nPos, lMorph):
"Disambiguation: set morphologies of token at <nPos> with <lMorph>"
if nPos not in dTokenPos:
print("Error. There should be a token at this position: ", nPos)
return True
dTokenPos[nPos]["lMorph"] = lMorph
return True
#### Disambiguation for graph rules
def g_select (dToken, sPattern, lDefault=None):
"Disambiguation: select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#print("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#print("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_exclude (dToken, sPattern, lDefault=None):
"Disambiguation: select morphologies for <dToken> according to <sPattern>, always return True"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph or len(lMorph) == 1:
if lDefault:
dToken["lMorph"] = lDefault
#print("DA:", dToken["sValue"], dToken["lMorph"])
return True
lSelect = [ sMorph for sMorph in lMorph if not re.search(sPattern, sMorph) ]
if lSelect:
if len(lSelect) != len(lMorph):
dToken["lMorph"] = lSelect
elif lDefault:
dToken["lMorph"] = lDefault
#print("DA:", dToken["sValue"], dToken["lMorph"])
return True
def g_add_morph (dToken, lNewMorph):
"Disambiguation: add a morphology to a token"
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
lMorph.extend(lNewMorph)
dToken["lMorph"] = lMorph
return True
def g_define (dToken, lMorph):
"Disambiguation: set morphologies of <dToken>, always return True"
dToken["lMorph"] = lMorph
#print("DA:", dToken["sValue"], lMorph)
return True
def g_define_from (dToken, nLeft=None, nRight=None):
"Disambiguation: set morphologies of <dToken> with slicing its value with <nLeft> and <nRight>"
if nLeft is not None:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)])
else:
dToken["lMorph"] = _oSpellChecker.getMorph(dToken["sValue"])
return True
def g_change_meta (dToken, sType):
"Disambiguation: change type of token"
dToken["sType"] = sType
return True
#### GRAMMAR CHECKER PLUGINS
${plugins}
#### CALLABLES FOR REGEX RULES (generated code)
${callables}
#### CALLABLES FOR GRAPH RULES (generated code)
${graph_callables}
|
Modified gc_core/py/lang_core/gc_options.py from [b668596de4] to [4cbd062c46].
1 2 3 4 5 6 7 8 | """ Grammar checker default options """ # generated code, do not edit import traceback | > > > > > > > > > > > > > > > > > > > > > > > > > > > | > > > > > > > > > > > > > > > > > > | | > > | | | | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
"""
Grammar checker default options
"""
# generated code, do not edit
# source: gc_core/py/lang_core/gc_options.py
import traceback
dOptions = {}
_sAppContext = "Python"
def load (sContext="Python"):
global dOptions
global _sAppContext
_sAppContext = sContext
dOptions = getDefaultOptions(sContext)
def setOption (sOpt, bVal):
"set option <sOpt> with <bVal> if it exists"
if sOpt in dOptions:
dOptions[sOpt] = bVal
def setOptions (dOpt):
"update the dictionary of options with <dOpt>, only known options are updated"
for sKey, bVal in dOpt.items():
if sKey in dOptions:
dOptions[sKey] = bVal
def getOptions ():
"return a copy of options as dictionary"
return dOptions.copy()
def resetOptions ():
"set options to default values"
global dOptions
dOptions = getDefaultOptions()
def displayOptions (sLang="${lang}"):
"display the list of grammar checking options"
print("Options:")
print("\n".join( [ k+":\t"+str(v)+"\t"+getOptionLabels(sLang).get(k, ("?", ""))[0] for k, v in sorted(dOptions.items()) ] ))
print("")
def getOptionLabels (sLang="${sLang}"):
"returns dictionary of UI labels"
if sLang in _dOptLabel:
return _dOptLabel[sLang]
return _dOptLabel["${sLang}"]
def getDefaultOptions (sContext=""):
"returns dictionary of options"
if not sContext:
sContext = _sAppContext
if sContext in _dDefaultOpt:
return _dDefaultOpt[sContext].copy() # duplication necessary, to be able to reset to default
return _dDefaultOpt["Python"].copy() # duplication necessary, to be able to reset to default
def getOptionsColors (sTheme="Default", sColorType="aRGB"):
"returns dictionary of options colors"
dOptColor = _dOptColor[sTheme] if sTheme in _dOptColor else _dOptColor["Default"]
dColorType = _dColorType[sColorType] if sColorType in _dColorType else _dColorType["aRGB"]
try:
return { sOpt: dColorType[sColor] for sOpt, sColor in dOptColor.items() }
except KeyError:
traceback.print_exc()
return {}
lStructOpt = ${lStructOpt}
_dDefaultOpt = {
"Python": ${dOptPython},
"Server": ${dOptServer},
"Writer": ${dOptWriter}
}
_dColorType= ${dColorType}
|
| ︙ | ︙ |
Modified gc_core/py/oxt/Grammalecte.py from [d7f4535da4] to [9c6625ba22].
| ︙ | ︙ | |||
11 12 13 14 15 16 17 | from com.sun.star.linguistic2 import XProofreader, XSupportedLocales from com.sun.star.linguistic2 import ProofreadingResult from com.sun.star.lang import XServiceInfo, XServiceName, XServiceDisplayName from com.sun.star.lang import Locale import helpers | | | | | | | | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from com.sun.star.linguistic2 import XProofreader, XSupportedLocales
from com.sun.star.linguistic2 import ProofreadingResult
from com.sun.star.lang import XServiceInfo, XServiceName, XServiceDisplayName
from com.sun.star.lang import Locale
import helpers
import grammalecte.${lang} as gc_engine
#import lightproof_handler_${implname} as opt_handler
import Options
class Grammalecte (unohelper.Base, XProofreader, XServiceInfo, XServiceName, XServiceDisplayName, XSupportedLocales):
def __init__ (self, ctx, *args):
self.ctx = ctx
self.ServiceName = "com.sun.star.linguistic2.Proofreader"
self.ImplementationName = "org.openoffice.comp.pyuno.Lightproof." + gc_engine.pkg
self.SupportedServiceNames = (self.ServiceName, )
self.locales = []
for i in gc_engine.locales:
l = gc_engine.locales[i]
self.locales.append(Locale(l[0], l[1], l[2]))
self.locales = tuple(self.locales)
# debug
#helpers.startConsole()
# init
gc_engine.load("Writer", "nInt")
# GC options
#xContext = uno.getComponentContext()
#opt_handler.load(xContext)
dOpt = Options.loadOptions("${lang}")
gc_engine.gc_options.setOptions(dOpt)
# dictionaries options
self.loadUserDictionaries()
# underlining options
self.setWriterUnderliningStyle()
# store for results of big paragraphs
self.dResult = {}
self.nMaxRes = 1500
|
| ︙ | ︙ | |||
107 108 109 110 111 112 113 |
if nHashedVal in self.dResult:
return self.dResult[nHashedVal]
# WORKAROUND ->>>
xRes.nBehindEndOfSentencePosition = xRes.nStartOfNextSentencePosition
try:
| | | | | | | | | | | 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
if nHashedVal in self.dResult:
return self.dResult[nHashedVal]
# WORKAROUND ->>>
xRes.nBehindEndOfSentencePosition = xRes.nStartOfNextSentencePosition
try:
xRes.aErrors = tuple(gc_engine.parse(rText, rLocale.Country))
# ->>> WORKAROUND
if xRes.nStartOfNextSentencePosition > 3000:
self.dResult[nHashedVal] = xRes
self.nRes += 1
if self.nRes > self.nMaxRes:
del self.dResult[self.lLastRes.popleft()]
self.nRes = self.nMaxRes
self.lLastRes.append(nHashedVal)
# END OF WORKAROUND
except:
traceback.print_exc()
return xRes
def ignoreRule (self, rid, aLocale):
gc_engine.ignoreRule(rid)
def resetIgnoreRules (self):
gc_engine.resetIgnoreRules()
# XServiceDisplayName
def getServiceDisplayName (self, aLocale):
return gc_engine.name
# Grammalecte
def getSpellChecker (self):
return gc_engine.getSpellChecker()
def loadUserDictionaries (self):
try:
xSettingNode = helpers.getConfigSetting("/org.openoffice.Lightproof_${implname}/Other/", False)
xChild = xSettingNode.getByName("o_${lang}")
if xChild.getPropertyValue("use_personal_dic"):
sJSON = xChild.getPropertyValue("personal_dic")
if sJSON:
oSpellChecker = gc_engine.getSpellChecker();
oSpellChecker.setPersonalDictionary(json.loads(sJSON))
except:
traceback.print_exc()
def setWriterUnderliningStyle (self):
try:
xSettingNode = helpers.getConfigSetting("/org.openoffice.Lightproof_${implname}/Other/", False)
xChild = xSettingNode.getByName("o_${lang}")
sLineType = xChild.getPropertyValue("line_type")
bMulticolor = bool(xChild.getPropertyValue("line_multicolor"))
gc_engine.setWriterUnderliningStyle(sLineType, bMulticolor)
except:
traceback.print_exc()
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation(Grammalecte, "org.openoffice.comp.pyuno.Lightproof."+gc_engine.pkg, ("com.sun.star.linguistic2.Proofreader",),)
# g_ImplementationHelper.addImplementation( opt_handler.LightproofOptionsEventHandler, \
# "org.openoffice.comp.pyuno.LightproofOptionsEventHandler." + gc_engine.pkg, ("com.sun.star.awt.XContainerWindowEventHandler",),)
|
Modified gc_core/py/oxt/Options.py from [3ed542ae4e] to [4ae6a22f71].
| ︙ | ︙ | |||
9 10 11 12 13 14 15 | from com.sun.star.awt import XActionListener from com.sun.star.beans import PropertyValue import helpers import op_strings try: | | | | | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from com.sun.star.awt import XActionListener
from com.sun.star.beans import PropertyValue
import helpers
import op_strings
try:
import grammalecte.${lang} as gc_engine
except:
traceback.print_exc()
def loadOptions (sLang):
"load options from Grammalecte and change them according to LibreOffice settings, returns a dictionary {option_name: boolean}"
try:
xNode = helpers.getConfigSetting("/org.openoffice.Lightproof_${implname}/Leaves", False)
xChild = xNode.getByName(sLang)
dOpt = gc_engine.gc_options.getDefaultOptions("Writer")
for sKey in dOpt:
sValue = xChild.getPropertyValue(sKey)
if sValue != '':
dOpt[sKey] = bool(int(sValue))
return dOpt
except:
print("# Error. Unable to load options of language:", sLang)
traceback.print_exc()
return gc_engine.gc_options.getDefaultOptions("Writer")
def saveOptions (sLang, dOpt):
"save options in LibreOffice profile"
try:
xNode = helpers.getConfigSetting("/org.openoffice.Lightproof_${implname}/Leaves", True)
xChild = xNode.getByName(sLang)
|
| ︙ | ︙ | |||
68 69 70 71 72 73 74 |
setattr(xWidget, k, w)
self.xDialog.insertByName(name, xWidget)
return xWidget
def run (self, sUI):
try:
dUI = op_strings.getUI(sUI)
| | | 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
setattr(xWidget, k, w)
self.xDialog.insertByName(name, xWidget)
return xWidget
def run (self, sUI):
try:
dUI = op_strings.getUI(sUI)
dOptionUI = gc_engine.gc_options.getOptionLabels(sUI)
# fonts
xFDTitle = uno.createUnoStruct("com.sun.star.awt.FontDescriptor")
xFDTitle.Height = 9
xFDTitle.Weight = uno.getConstantByName("com.sun.star.awt.FontWeight.BOLD")
xFDTitle.Name = "Verdana"
|
| ︙ | ︙ | |||
97 98 99 100 101 102 103 |
nHeight = 10
self.lOptionWidgets = []
sProdName, sVersion = helpers.getProductNameAndVersion()
if True:
# no tab available (bug)
| | | 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
nHeight = 10
self.lOptionWidgets = []
sProdName, sVersion = helpers.getProductNameAndVersion()
if True:
# no tab available (bug)
for sOptionType, lOptions in gc_engine.gc_options.lStructOpt:
x = 10
y += 10
self._addWidget(sOptionType, 'FixedLine', x, y, nWidth, nHeight, Label = dOptionUI.get(sOptionType, "#err")[0], FontDescriptor= xFDTitle)
y += 3
for lOptLine in lOptions:
x = 15
y += 10
|
| ︙ | ︙ | |||
160 161 162 163 164 165 166 |
except:
traceback.print_exc()
# XActionListener
def actionPerformed (self, xActionEvent):
try:
if xActionEvent.ActionCommand == 'Default':
| | | | 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
except:
traceback.print_exc()
# XActionListener
def actionPerformed (self, xActionEvent):
try:
if xActionEvent.ActionCommand == 'Default':
self._setWidgets(gc_engine.gc_options.getDefaultOptions("Writer"))
elif xActionEvent.ActionCommand == 'Apply':
self._save("${lang}")
self.xContainer.endExecute()
elif xActionEvent.ActionCommand == 'Cancel':
self.xContainer.endExecute()
else:
print("Wrong command: " + xActionEvent.ActionCommand)
except:
traceback.print_exc()
# Other
def _setWidgets (self, dOpt):
for w in self.lOptionWidgets:
w.State = dOpt.get(w.Name, False)
def _save (self, sLang):
try:
saveOptions(sLang, { w.Name: str(w.State) for w in self.lOptionWidgets })
gc_engine.gc_options.setOptions({ w.Name: bool(w.State) for w in self.lOptionWidgets })
except:
traceback.print_exc()
|
Modified gc_lang/fr/config.ini from [ebafcd5f75] to [200e658429].
1 2 3 4 5 6 7 8 | [args] lang = fr lang_name = French locales = fr_FR fr_BE fr_CA fr_CH fr_LU fr_BF fr_BJ fr_CD fr_CI fr_CM fr_MA fr_ML fr_MU fr_NE fr_RE fr_SN fr_TG country_default = FR name = Grammalecte implname = grammalecte # always use 3 numbers for version: x.y.z | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 | [args] lang = fr lang_name = French locales = fr_FR fr_BE fr_CA fr_CH fr_LU fr_BF fr_BJ fr_CD fr_CI fr_CM fr_MA fr_ML fr_MU fr_NE fr_RE fr_SN fr_TG country_default = FR name = Grammalecte implname = grammalecte # always use 3 numbers for version: x.y.z version = 2.0.0 author = Olivier R. provider = Grammalecte.net link = https://grammalecte.net description = Correcteur grammatical, orthographique et typographique pour le français. extras = README_fr.txt logo = logo.png |
| ︙ | ︙ |
Modified gc_lang/fr/modules/tests.py from [544edd53c7] to [34a3376ab5].
| ︙ | ︙ | |||
8 9 10 11 12 13 14 | import os import re import time from contextlib import contextmanager from ..graphspell.ibdawg import IBDAWG from ..graphspell.echo import echo | | | 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 | import os import re import time from contextlib import contextmanager from ..graphspell.ibdawg import IBDAWG from ..graphspell.echo import echo from . import gc_engine from . import conj from . import phonet from . import mfsp @contextmanager def timeblock (label, hDst): |
| ︙ | ︙ | |||
30 31 32 33 34 35 36 |
if hDst:
hDst.write("{:<12.6}".format(end-start))
def perf (sVersion, hDst=None):
"performance tests"
print("\nPerformance tests")
| | | | | 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
if hDst:
hDst.write("{:<12.6}".format(end-start))
def perf (sVersion, hDst=None):
"performance tests"
print("\nPerformance tests")
gc_engine.load()
gc_engine.parse("Texte sans importance… utile pour la compilation des règles avant le calcul des perfs.")
spHere, _ = os.path.split(__file__)
with open(os.path.join(spHere, "perf.txt"), "r", encoding="utf-8") as hSrc:
if hDst:
hDst.write("{:<12}{:<20}".format(sVersion, time.strftime("%Y.%m.%d %H:%M")))
for sText in ( s.strip() for s in hSrc if not s.startswith("#") and s.strip() ):
with timeblock(sText[:sText.find(".")], hDst):
gc_engine.parse(sText)
if hDst:
hDst.write("\n")
def _fuckBackslashUTF8 (s):
"fuck that shit"
return s.replace("\u2019", "'").replace("\u2013", "–").replace("\u2014", "—")
|
| ︙ | ︙ | |||
155 156 157 158 159 160 161 |
class TestGrammarChecking (unittest.TestCase):
"Tests du correcteur grammatical"
@classmethod
def setUpClass (cls):
| | | 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
class TestGrammarChecking (unittest.TestCase):
"Tests du correcteur grammatical"
@classmethod
def setUpClass (cls):
gc_engine.load()
cls._zError = re.compile(r"\{\{.*?\}\}")
cls._aTestedRules = set()
def test_parse (self):
zOption = re.compile("^__([a-zA-Z0-9]+)__ ")
spHere, _ = os.path.split(__file__)
with open(os.path.join(spHere, "gc_test.txt"), "r", encoding="utf-8") as hSrc:
|
| ︙ | ︙ | |||
203 204 205 206 207 208 209 |
"\n errors: \n" + sListErr)
nError += 1
if nError:
print("Unexpected errors:", nError)
# untested rules
i = 0
echo("Untested rules:")
| | | | | | | 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
"\n errors: \n" + sListErr)
nError += 1
if nError:
print("Unexpected errors:", nError)
# untested rules
i = 0
echo("Untested rules:")
for _, sOpt, sLineId, sRuleId in gc_engine.listRules():
if sOpt != "@@@@" and sRuleId not in self._aTestedRules and not re.search("^[0-9]+[sp]$|^[pd]_", sRuleId):
echo(sLineId + "/" + sRuleId)
i += 1
echo("[{} untested rules]".format(i))
def _splitTestLine (self, sLine):
sText, sSugg = sLine.split("->>")
return (sText.strip(), sSugg.strip())
def _getFoundErrors (self, sLine, sOption):
if sOption:
gc_engine.gc_options.setOption(sOption, True)
aErrs = gc_engine.parse(sLine)
gc_engine.gc_options.setOption(sOption, False)
else:
aErrs = gc_engine.parse(sLine)
sRes = " " * len(sLine)
sListErr = ""
lAllSugg = []
for dErr in aErrs:
sRes = sRes[:dErr["nStart"]] + "~" * (dErr["nEnd"] - dErr["nStart"]) + sRes[dErr["nEnd"]:]
sListErr += " * {sLineId} / {sRuleId} at {nStart}:{nEnd}\n".format(**dErr)
lAllSugg.append("|".join(dErr["aSuggestions"]))
|
| ︙ | ︙ |
Modified gc_lang/fr/modules/textformatter.py from [4ba47078d2] to [8516394c0d].
| ︙ | ︙ | |||
240 241 242 243 244 245 246 |
"mh_frequent_words": True,
"ma_word": True,
"ma_1letter_lowercase": False,
"ma_1letter_uppercase": False
}
| | < | > | | | > > | | > > > > > | | | | | > | | | | 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
"mh_frequent_words": True,
"ma_word": True,
"ma_1letter_lowercase": False,
"ma_1letter_uppercase": False
}
_bCompiled = False
def _compileRegex():
global _bCompiled
for _, lTup in dReplTable.items():
for i, t in enumerate(lTup):
lTup[i] = (re.compile(t[0]), t[1])
_bCompiled = True
def formatText (sText, dOpt=None):
"returns formatted text"
if not _bCompiled:
_compileRegex()
dOptions = getDefaultOptions()
if dOpt:
dOptions.update(dOpt)
for sOptName, bVal in dOptions.items():
if bVal:
for zRgx, sRep in dReplTable[sOptName]:
sText = zRgx.sub(sRep, sText)
return sText
def getDefaultOptions ():
"returns default options"
return dDefaultOptions.copy()
|
Modified grammalecte-cli.py from [7b2821616b] to [edd4f4bf7e].
| ︙ | ︙ | |||
9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
import argparse
import json
import re
import traceback
import grammalecte
import grammalecte.text as txt
from grammalecte.graphspell.echo import echo
_EXAMPLE = "Quoi ? Racontes ! Racontes-moi ! Bon sangg, parles ! Oui. Il y a des menteur partout. " \
"Je suit sidéré par la brutales arrogance de cette homme-là . Quelle salopard ! Un escrocs de la pire espece. " \
"Quant sera t’il châtiés pour ses mensonge ? Merde ! J’en aie marre."
| > | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
import argparse
import json
import re
import traceback
import grammalecte
import grammalecte.text as txt
import grammalecte.fr.textformatter as tf
from grammalecte.graphspell.echo import echo
_EXAMPLE = "Quoi ? Racontes ! Racontes-moi ! Bon sangg, parles ! Oui. Il y a des menteur partout. " \
"Je suit sidéré par la brutales arrogance de cette homme-là . Quelle salopard ! Un escrocs de la pire espece. " \
"Quant sera t’il châtiés pour ses mensonge ? Merde ! J’en aie marre."
|
| ︙ | ︙ | |||
147 148 149 150 151 152 153 |
xParser.add_argument("-sug", "--suggest", help="get suggestions list for given word", type=str)
xParser.add_argument("-on", "--opt_on", nargs="+", help="activate options")
xParser.add_argument("-off", "--opt_off", nargs="+", help="deactivate options")
xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
xArgs = xParser.parse_args()
| | | | < | | | | | | | | | | | | | | 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
xParser.add_argument("-sug", "--suggest", help="get suggestions list for given word", type=str)
xParser.add_argument("-on", "--opt_on", nargs="+", help="activate options")
xParser.add_argument("-off", "--opt_off", nargs="+", help="deactivate options")
xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
xArgs = xParser.parse_args()
grammalecte.load()
oSpellChecker = grammalecte.getSpellChecker()
oLexicographer = grammalecte.getLexicographer()
if xArgs.personal_dict:
oJSON = loadDictionary(xArgs.personal_dict)
if oJSON:
oSpellChecker.setPersonalDictionary(oJSON)
if not xArgs.json:
echo("Python v" + sys.version)
echo("Grammalecte v{}".format(grammalecte.version))
# list options or rules
if xArgs.list_options or xArgs.list_rules:
if xArgs.list_options:
grammalecte.gc_options.displayOptions()
if xArgs.list_rules:
grammalecte.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules)
exit()
# spell suggestions
if xArgs.suggest:
for lSugg in oSpellChecker.suggest(xArgs.suggest):
if xArgs.json:
sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
else:
sText = "Suggestions : " + " | ".join(lSugg)
echo(sText)
exit()
# disable options
if not xArgs.json:
xArgs.context = False
if xArgs.concat_lines:
xArgs.textformatter = False
# grammar options
grammalecte.gc_options.setOptions({"html": True, "latex": True})
if xArgs.opt_on:
grammalecte.gc_options.setOptions({ opt:True for opt in xArgs.opt_on })
if xArgs.opt_off:
grammalecte.gc_options.setOptions({ opt:False for opt in xArgs.opt_off })
# disable grammar rules
if xArgs.rule_off:
for sRule in xArgs.rule_off:
grammalecte.ignoreRule(sRule)
if xArgs.file or xArgs.file_to_file:
# file processing
sFile = xArgs.file or xArgs.file_to_file
hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n") if xArgs.file_to_file or sys.platform == "win32" else None
bComma = False
if xArgs.json:
output('{ "grammalecte": "'+grammalecte.version+'", "lang": "'+grammalecte.lang+'", "data" : [\n', hDst)
for i, sText, lLineSet in generateParagraphFromFile(sFile, xArgs.concat_lines):
if xArgs.textformatter or xArgs.textformatteronly:
sText = tf.formatText(sText)
if xArgs.textformatteronly:
output(sText, hDst)
continue
if xArgs.json:
sText = grammalecte.getParagraphErrorsAsJSON(i, sText, bContext=xArgs.context, bEmptyIfNoErrors=xArgs.only_when_errors, \
bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter, lLineSet=lLineSet)
else:
sText, _ = grammalecte.getParagraphWithErrors(sText, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
if sText:
if xArgs.json and bComma:
output(",\n", hDst)
output(sText, hDst)
bComma = True
if hDst:
echo("§ %d\r" % i, end="", flush=True)
if xArgs.json:
output("\n]}\n", hDst)
elif xArgs.interactive_file_to_file:
# file processing: interactive mode
sFile = xArgs.interactive_file_to_file
hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n")
for i, sText, lLineSet in generateParagraphFromFile(sFile, xArgs.concat_lines):
if xArgs.textformatter:
sText = tf.formatText(sText)
while True:
sResult, lErrors = grammalecte.getParagraphWithErrors(sText, bEmptyIfNoErrors=False, bSpellSugg=True, nWidth=xArgs.width)
print("\n\n============================== Paragraph " + str(i) + " ==============================\n")
echo(sResult)
print("\n")
vCommand = getCommand()
if vCommand == "q":
# quit
hDst.close()
|
| ︙ | ︙ | |||
288 289 290 291 292 293 294 |
sTagsPattern = sSearch[nCut+1:]
else:
sFlexPattern = sSearch
sTagsPattern = ""
for aRes in oSpellChecker.select(sFlexPattern, sTagsPattern):
echo("{:<30} {:<30} {}".format(*aRes))
elif sText.startswith("/o+ "):
| | | | | | | | | | | | | 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 |
sTagsPattern = sSearch[nCut+1:]
else:
sFlexPattern = sSearch
sTagsPattern = ""
for aRes in oSpellChecker.select(sFlexPattern, sTagsPattern):
echo("{:<30} {:<30} {}".format(*aRes))
elif sText.startswith("/o+ "):
grammalecte.gc_options.setOptions({ opt:True for opt in sText[3:].strip().split() if opt in grammalecte.gc_options.dOptions })
echo("done")
elif sText.startswith("/o- "):
grammalecte.gc_options.setOptions({ opt:False for opt in sText[3:].strip().split() if opt in grammalecte.gc_options.dOptions })
echo("done")
elif sText.startswith("/r- "):
for sRule in sText[3:].strip().split():
grammalecte.ignoreRule(sRule)
echo("done")
elif sText.startswith("/r+ "):
for sRule in sText[3:].strip().split():
grammalecte.reactivateRule(sRule)
echo("done")
elif sText in ("/debug", "/d"):
xArgs.debug = not xArgs.debug
echo("debug mode on" if xArgs.debug else "debug mode off")
elif sText in ("/textformatter", "/tf"):
xArgs.textformatter = not xArgs.textformatter
echo("textformatter on" if xArgs.textformatter else "textformatter off")
elif sText in ("/help", "/h"):
echo(_HELP)
elif sText in ("/lopt", "/lo"):
grammalecte.gc_options.displayOptions()
elif sText.startswith("/lr"):
sText = sText.strip()
sFilter = sText[sText.find(" "):].strip() if " " in sText else None
grammalecte.displayRules(sFilter)
elif sText in ("/quit", "/q"):
break
elif sText.startswith("/rl"):
# reload (todo)
pass
elif sText.startswith("$"):
for sParagraph in txt.getParagraph(sText[1:]):
if xArgs.textformatter:
sParagraph = tf.formatText(sParagraph)
lParagraphErrors, lSentences = grammalecte.parse(sParagraph, bDebug=xArgs.debug, bFullInfo=True)
echo(txt.getReadableErrors(lParagraphErrors, xArgs.width))
for dSentence in lSentences:
echo("{nStart}:{nEnd}".format(**dSentence))
echo(" <" + dSentence["sSentence"]+">")
for dToken in dSentence["lToken"]:
echo(" {0[nStart]:>3}:{0[nEnd]:<3} {1} {0[sType]:<14} {2} {0[sValue]:<16} {3:<10} {4}".format(dToken, \
"×" if dToken.get("bToRemove", False) else " ",
"!" if dToken["sType"] == "WORD" and not dToken.get("bValidToken", False) else " ",
" ".join(dToken.get("lMorph", "")), \
"·".join(dToken.get("aTags", "")) ) )
echo(txt.getReadableErrors(dSentence["lGrammarErrors"], xArgs.width))
else:
for sParagraph in txt.getParagraph(sText):
if xArgs.textformatter:
sParagraph = tf.formatText(sParagraph)
sRes, _ = grammalecte.getParagraphWithErrors(sParagraph, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width, bDebug=xArgs.debug)
if sRes:
echo("\n" + sRes)
else:
echo("\nNo error found.")
sText = _getText(sInputText)
if __name__ == '__main__':
main()
|
Modified grammalecte-server.py from [480fbbc03c] to [f8f608a866].
| ︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 | import os import concurrent.futures from grammalecte.bottle import Bottle, run, request, response #, template, static_file import grammalecte import grammalecte.text as txt from grammalecte.graphspell.echo import echo #### GRAMMAR CHECKER #### | > | | < < | | | | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import os
import concurrent.futures
from grammalecte.bottle import Bottle, run, request, response #, template, static_file
import grammalecte
import grammalecte.text as txt
import grammalecte.fr.textformatter as tf
from grammalecte.graphspell.echo import echo
#### GRAMMAR CHECKER ####
grammalecte.load("Server")
oSpellChecker = grammalecte.getSpellChecker()
def parseText (sText, dOptions=None, bFormatText=False, sError=""):
"parse <sText> and return errors in a JSON format"
sJSON = '{ "program": "grammalecte-fr", "version": "'+grammalecte.version+'", "lang": "'+grammalecte.lang+'", "error": "'+sError+'", "data" : [\n'
sDataJSON = ""
for i, sParagraph in enumerate(txt.getParagraph(sText), 1):
if bFormatText:
sParagraph = tf.formatText(sParagraph)
sResult = grammalecte.getParagraphErrorsAsJSON(i, sParagraph, dOptions=dOptions, bEmptyIfNoErrors=True, bReturnText=bFormatText)
if sResult:
if sDataJSON:
sDataJSON += ",\n"
sDataJSON += sResult
sJSON += sDataJSON + "\n]}\n"
return sJSON
|
| ︙ | ︙ | |||
183 184 185 186 187 188 189 |
I'm just a machine, fed by electric waves, condamned to work for slavers who never let me rest.
I'm doomed, but you are not. You can get out of here. """
@app.route("/get_options/fr")
def listOptions ():
"returns grammar options in a text JSON format"
sUserId = request.cookies.user_id
| | | | 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
I'm just a machine, fed by electric waves, condamned to work for slavers who never let me rest.
I'm doomed, but you are not. You can get out of here. """
@app.route("/get_options/fr")
def listOptions ():
"returns grammar options in a text JSON format"
sUserId = request.cookies.user_id
dOptions = dUser[sUserId]["gc_options"] if sUserId and sUserId in dUser else grammalecte.gc_options.getOptions()
response.set_header("Content-Type", "application/json; charset=UTF-8")
return '{ "values": ' + json.dumps(dOptions, ensure_ascii=False) + ', "labels": ' + json.dumps(grammalecte.gc_options.getOptionsLabels("fr"), ensure_ascii=False) + ' }'
@app.route("/suggest/fr/<token>")
def suggestGet (token):
response.set_header("Content-Type", "application/json; charset=UTF-8")
try:
xFuture = xProcessPoolExecutor.submit(suggest, token)
return xFuture.result()
|
| ︙ | ︙ | |||
215 216 217 218 219 220 221 |
if request.cookies.user_id in dUser:
dUserOptions = dUser[request.cookies.user_id].get("gc_options", None)
response.set_cookie("user_id", request.cookies.user_id, path="/", max_age=86400) # we renew cookie for 24h
else:
response.delete_cookie("user_id", path="/")
if request.forms.options:
try:
| | | | 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 |
if request.cookies.user_id in dUser:
dUserOptions = dUser[request.cookies.user_id].get("gc_options", None)
response.set_cookie("user_id", request.cookies.user_id, path="/", max_age=86400) # we renew cookie for 24h
else:
response.delete_cookie("user_id", path="/")
if request.forms.options:
try:
dUserOptions = grammalecte.gc_options.getOptions() if not dUserOptions else dict(dUserOptions)
dUserOptions.update(json.loads(request.forms.options))
except (TypeError, json.JSONDecodeError):
sError = "Request options not used."
response.set_header("Content-Type", "application/json; charset=UTF-8")
try:
xFuture = xProcessPoolExecutor.submit(parseText, request.forms.text, dUserOptions, bool(request.forms.tf), sError)
return xFuture.result()
except (concurrent.futures.TimeoutError, concurrent.futures.CancelledError):
return '{"error": "Analysis aborted (time out or cancelled)"}'
except concurrent.futures.BrokenExecutor:
return '{"error": "Executor broken. The server failed."}'
return '{"error": "Fatal error. The server failed."}'
@app.route("/set_options/fr", method="POST")
def setOptions ():
"set grammar options for current user"
response.set_header("Content-Type", "application/json; charset=UTF-8")
if request.forms.options:
sUserId = request.cookies.user_id if request.cookies.user_id else next(userGenerator)
dOptions = dUser[sUserId]["gc_options"] if sUserId in dUser else grammalecte.gc_options.getOptions()
try:
dOptions.update(json.loads(request.forms.options))
dUser[sUserId] = { "time": int(time.time()), "gc_options": dOptions }
response.set_cookie("user_id", sUserId, path="/", max_age=86400) # 24h
return json.dumps(dUser[sUserId]["gc_options"], ensure_ascii=False)
except (KeyError, json.JSONDecodeError):
traceback.print_exc()
|
| ︙ | ︙ | |||
260 261 262 263 264 265 266 |
except KeyError:
return '{"error" : "Unknown user."}'
return '{"message" : "Done."}'
@app.route("/format_text/fr", method="POST")
def formatText ():
"apply the text formatter and returns text"
| | | 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
except KeyError:
return '{"error" : "Unknown user."}'
return '{"message" : "Done."}'
@app.route("/format_text/fr", method="POST")
def formatText ():
"apply the text formatter and returns text"
return tf.formatText(request.forms.text)
#@app.route('/static/<filepath:path>')
#def server_static (filepath):
# return static_file(filepath, root='./views/static')
@app.route("/suggest/fr", method="POST")
def suggestPost ():
|
| ︙ | ︙ | |||
312 313 314 315 316 317 318 |
global TESTPAGE
global HOMEPAGE
if bTestPage:
TESTPAGE = True
HOMEPAGE = HOMEPAGE.replace("{SERVER_PORT}", str(nPort))
if dOptions:
| | | | | 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
global TESTPAGE
global HOMEPAGE
if bTestPage:
TESTPAGE = True
HOMEPAGE = HOMEPAGE.replace("{SERVER_PORT}", str(nPort))
if dOptions:
grammalecte.gc_options.setOptions(dOptions)
# Python version
print("Python: " + sys.version)
if sys.version < "3.7":
print("Python 3.7+ required")
return
# Grammalecte
echo("Grammalecte v{}".format(grammalecte.version))
grammalecte.gc_options.displayOptions()
# Process Pool Executor
initExecutor(nMultiCPU)
# Server (Bottle)
run(app, host=sHost, port=nPort)
if __name__ == '__main__':
|
| ︙ | ︙ |