233
234
235
236
237
238
239
240
241
242
243
244
245
246
|
def __init__ (self, sText):
self.sText = sText
self.sText0 = sText
self.sSentence = ""
self.sSentence0 = ""
self.nOffsetWithinParagraph = 0
self.lTokens = []
self.dTokenPos = {} # {position: token}
self.dTags = {} # {position: tags}
self.dError = {} # {position: error}
self.dSentenceError = {} # {position: error} (for the current sentence only)
self.dErrorPriority = {} # {position: priority of the current error}
def __str__ (self):
|
>
|
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
|
def __init__ (self, sText):
self.sText = sText
self.sText0 = sText
self.sSentence = ""
self.sSentence0 = ""
self.nOffsetWithinParagraph = 0
self.lTokens = []
self.lTokens0 = []
self.dTokenPos = {} # {position: token}
self.dTags = {} # {position: tags}
self.dError = {} # {position: error}
self.dSentenceError = {} # {position: error} (for the current sentence only)
self.dErrorPriority = {} # {position: priority of the current error}
def __str__ (self):
|
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
|
dOpt = dOptions or gc_options.dOptions
bShowRuleId = gc_options.dOptions.get('idrule', False)
# parse paragraph
try:
self.parseText(self.sText, self.sText0, True, 0, sCountry, dOpt, bShowRuleId, bDebug, bContext)
except:
raise
self.lTokens = None
self.lTokens0 = None
if bFullInfo:
lParagraphErrors = list(self.dError.values())
lSentences = []
self.dSentenceError.clear()
# parse sentences
sText = self._getCleanText()
for iStart, iEnd in text.getSentenceBoundaries(sText):
|
<
<
|
265
266
267
268
269
270
271
272
273
274
275
276
277
278
|
dOpt = dOptions or gc_options.dOptions
bShowRuleId = gc_options.dOptions.get('idrule', False)
# parse paragraph
try:
self.parseText(self.sText, self.sText0, True, 0, sCountry, dOpt, bShowRuleId, bDebug, bContext)
except:
raise
if bFullInfo:
lParagraphErrors = list(self.dError.values())
lSentences = []
self.dSentenceError.clear()
# parse sentences
sText = self._getCleanText()
for iStart, iEnd in text.getSentenceBoundaries(sText):
|
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
|
})
self.dSentenceError.clear()
except:
raise
if bFullInfo:
# Grammar checking and sentence analysis
return lParagraphErrors, lSentences
else:
# Grammar checking only
return self.dError.values() # this is a view (iterable)
def _getCleanText (self):
sText = self.sText
if " " in sText:
sText = sText.replace(" ", ' ') # nbsp
if " " in sText:
sText = sText.replace(" ", ' ') # nnbsp
|
<
|
|
|
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
|
})
self.dSentenceError.clear()
except:
raise
if bFullInfo:
# Grammar checking and sentence analysis
return lParagraphErrors, lSentences
# Grammar checking only
return self.dError.values() # this is a view (iterable)
def _getCleanText (self):
sText = self.sText
if " " in sText:
sText = sText.replace(" ", ' ') # nbsp
if " " in sText:
sText = sText.replace(" ", ' ') # nnbsp
|
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
|
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
self.lTokens[nTokenRewriteStart]["sNewValue"] = sWhat
else:
# several tokens
lTokenValue = sWhat.split("|")
if len(lTokenValue) != (nTokenRewriteEnd - nTokenRewriteStart + 1):
if (bDebug):
echo("Error. Text processor: number of replacements != number of tokens.")
return
for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue):
if not sValue or sValue == "*":
self.lTokens[i]["bToRemove"] = True
else:
if bUppercase:
sValue = sValue[0:1].upper() + sValue[1:]
self.lTokens[i]["sNewValue"] = sValue
def rewriteFromTags (self, bDebug=False):
"rewrite the sentence, modify tokens, purge the token list"
if bDebug:
echo("REWRITE")
lNewTokens = []
lNewTokens0 = []
nMergeUntil = 0
dTokenMerger = {}
for iToken, dToken in enumerate(self.lTokens):
bKeepToken = True
if dToken["sType"] != "INFO":
if nMergeUntil and iToken <= nMergeUntil:
# token to merge
|
|
<
|
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
|
if bUppercase:
sWhat = sWhat[0:1].upper() + sWhat[1:]
self.lTokens[nTokenRewriteStart]["sNewValue"] = sWhat
else:
# several tokens
lTokenValue = sWhat.split("|")
if len(lTokenValue) != (nTokenRewriteEnd - nTokenRewriteStart + 1):
if bDebug:
echo("Error. Text processor: number of replacements != number of tokens.")
return
for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue):
if not sValue or sValue == "*":
self.lTokens[i]["bToRemove"] = True
else:
if bUppercase:
sValue = sValue[0:1].upper() + sValue[1:]
self.lTokens[i]["sNewValue"] = sValue
def rewriteFromTags (self, bDebug=False):
"rewrite the sentence, modify tokens, purge the token list"
if bDebug:
echo("REWRITE")
lNewTokens = []
nMergeUntil = 0
dTokenMerger = {}
for iToken, dToken in enumerate(self.lTokens):
bKeepToken = True
if dToken["sType"] != "INFO":
if nMergeUntil and iToken <= nMergeUntil:
# token to merge
|