1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
+
+
-
-
+
+
+
-
-
-
+
+
+
-
+
+
+
+
|
"""
Grammalecte
Grammar checker engine
"""
import re
import traceback
import json
import importlib
#import unicodedata
from itertools import chain
from ..graphspell.spellchecker import SpellChecker
from ..graphspell.echo import echo
from .. import text
from . import gc_options
from . import gc_engine_func as gce_func
from . import gc_engine_func as gce_func
from . import gc_options
try:
# LibreOffice / OpenOffice
from com.sun.star.linguistic2 import SingleProofreadingError
from com.sun.star.text.TextMarkupType import PROOFREADING
from com.sun.star.beans import PropertyValue
#import lightproof_handler_${implname} as opt
_bWriterError = True
except ImportError:
_bWriterError = False
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "getSpellChecker", \
"ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules", "setWriterUnderliningStyle" ]
#__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
# "load", "parse", "getSpellChecker", "getTextFormatter", "getLexicographer" \
# "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules", "setWriterUnderliningStyle" ]
__version__ = "${version}"
lang = "${lang}"
locales = ${loc}
pkg = "${implname}"
name = "${name}"
version = "${version}"
author = "${author}"
# Modules
_rules = None # module gc_rules
_rules_graph = None # module gc_rules_graph
# Data
# Tools
_oSpellChecker = None
_oTokenizer = None
_oLexicographer = None
# Data
_aIgnoredRules = set()
# Writer underlining style
_dOptionsColors = None
_bMulticolor = True
_nUnderliningStyle = 0
|
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
|
-
+
|
def reactivateRule (sRuleId):
"(re)activate rule <sRuleId>"
_aIgnoredRules.discard(sRuleId)
def listRules (sFilter=None):
"generator: returns typle (sOption, sLineId, sRuleId)"
"generator: returns tuple (sRuleType, sOption, sLineId, sRuleId)"
if sFilter:
try:
zFilter = re.compile(sFilter)
except re.error:
echo("# Error. List rules: wrong regex.")
sFilter = None
# regex rules
|
170
171
172
173
174
175
176
177
178
179
180
181
182
183
|
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
|
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
|
_nUnderliningStyle = 5
else:
_nUnderliningStyle = 0
_bMulticolor = bMulticolor
#### Parsing
def getParagraphErrors (sText, dOptions=None, bContext=False, bSpellSugg=False, bDebug=False):
"returns a tuple: (grammar errors, spelling errors)"
aGrammErrs = parse(sText, "FR", bDebug=bDebug, dOptions=dOptions, bContext=bContext)
aSpellErrs = _oSpellChecker.parseParagraph(sText, bSpellSugg)
return aGrammErrs, aSpellErrs
def getParagraphWithErrors (sText, dOptions=None, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100, bDebug=False):
"parse text and return a readable text with underline errors"
aGrammErrs, aSpellErrs = getParagraphErrors(sText, dOptions, False, bSpellSugg, bDebug)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ("", [])
return text.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)
def getParagraphErrorsAsJSON (iIndex, sText, dOptions=None, bContext=False, bEmptyIfNoErrors=False, bSpellSugg=False, bReturnText=False, lLineSet=None, bDebug=False):
"parse text and return errors as a JSON string"
aGrammErrs, aSpellErrs = getParagraphErrors(sText, dOptions, bContext, bSpellSugg, bDebug)
aGrammErrs = list(aGrammErrs)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ""
if lLineSet:
aGrammErrs, aSpellErrs = text.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
if bReturnText:
return json.dumps({ "iParagraph": iIndex, "sText": sText, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
return json.dumps({ "iParagraph": iIndex, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False, bFullInfo=False):
"init point to analyse <sText> and returns an iterable of errors or (with option <bFullInfo>) paragraphs errors and sentences with tokens and errors"
oText = TextParser(sText)
return oText.parse(sCountry, bDebug, dOptions, bContext, bFullInfo)
|