Overview
| Comment: | [cli] suggest() in spellchecker is a generator |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | cli | multid |
| Files: | files | file ages | folders |
| SHA3-256: |
966babe645b15984223f073367f3c420 |
| User & Date: | olr on 2018-02-13 15:57:14 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-02-13
| ||
| 16:05 | [graphspell][py] drawPath() for spellchecker check-in: ad736fa52b user: olr tags: graphspell, multid | |
| 15:57 | [cli] suggest() in spellchecker is a generator check-in: 966babe645 user: olr tags: cli, multid | |
| 15:44 | [core][cli][server][graphspell][fx] use spellchecker instead of ibdawg check-in: 18db5d65f0 user: olr tags: cli, core, server, fx, graphspell, multid | |
Changes
Modified grammalecte-cli.py from [93945e25e2] to [d66b04b565].
| ︙ | ︙ | |||
47 48 49 50 51 52 53 |
def _getErrors (sText, oTokenizer, oSpellChecker, bContext=False, bSpellSugg=False, bDebug=False):
"returns a tuple: (grammar errors, spelling errors)"
aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
aSpellErrs = []
for dToken in oTokenizer.genTokens(sText):
if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
if bSpellSugg:
| | > > | 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
def _getErrors (sText, oTokenizer, oSpellChecker, bContext=False, bSpellSugg=False, bDebug=False):
"returns a tuple: (grammar errors, spelling errors)"
aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
aSpellErrs = []
for dToken in oTokenizer.genTokens(sText):
if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
if bSpellSugg:
dToken['aSuggestions'] = []
for lSugg in oSpellChecker.suggest(dToken['sValue']):
dToken['aSuggestions'].extend(lSugg)
aSpellErrs.append(dToken)
return aGrammErrs, aSpellErrs
def generateText (sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, False, bSpellSugg, bDebug)
if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
|
| ︙ | ︙ | |||
140 141 142 143 144 145 146 |
if xArgs.list_options:
gce.displayOptions("fr")
if xArgs.list_rules:
gce.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules)
exit()
if xArgs.suggest:
| | | | | | | | 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
if xArgs.list_options:
gce.displayOptions("fr")
if xArgs.list_rules:
gce.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules)
exit()
if xArgs.suggest:
for lSugg in oSpellChecker.suggest(xArgs.suggest):
if xArgs.json:
sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
else:
sText = "Suggestions : " + " | ".join(lSugg)
echo(sText)
exit()
if not xArgs.json:
xArgs.context = False
gce.setOptions({"html": True, "latex": True})
if xArgs.opt_on:
|
| ︙ | ︙ | |||
218 219 220 221 222 223 224 |
if sWord:
echo("* " + sWord)
for sMorph in oSpellChecker.getMorph(sWord):
echo(" {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
elif sText.startswith("!"):
for sWord in sText[1:].strip().split():
if sWord:
| | | | 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
if sWord:
echo("* " + sWord)
for sMorph in oSpellChecker.getMorph(sWord):
echo(" {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
elif sText.startswith("!"):
for sWord in sText[1:].strip().split():
if sWord:
for lSugg in oSpellChecker.suggest(sWord):
echo(" | ".join(lSugg))
elif sText.startswith(">"):
oSpellChecker.drawPath(sText[1:].strip())
elif sText.startswith("="):
for sRes in oSpellChecker.select(sText[1:].strip()):
echo(sRes)
elif sText.startswith("/+ "):
gce.setOptions({ opt:True for opt in sText[3:].strip().split() if opt in gce.getOptions() })
|
| ︙ | ︙ |