127
128
129
130
131
132
133
134
135
136
137
138
139
140
|
while True:
yield str(i)
i += 1
def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False):
aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
aSpellErrs = []
for dToken in oTokenizer.genTokens(sText):
if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
aSpellErrs.append(dToken)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ""
return " " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
|
>
|
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
|
while True:
yield str(i)
i += 1
def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False):
aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
aGrammErrs = list(aGrammErrs)
aSpellErrs = []
for dToken in oTokenizer.genTokens(sText):
if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
aSpellErrs.append(dToken)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
return ""
return " " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
|