DELETED cli.py Index: cli.py ================================================================== --- cli.py +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/env python3 - -import sys -import os.path -import argparse -import json - -import grammalecte.fr as gce -import grammalecte.fr.lexicographe as lxg -import grammalecte.fr.textformatter as tf -import grammalecte.text as txt -import grammalecte.tokenizer as tkz -from grammalecte.echo import echo - - -_EXAMPLE = "Quoi ? Racontes ! Racontes-moi ! Bon sangg, parles ! Oui. Il y a des menteur partout. " \ - "Je suit sidéré par la brutales arrogance de cette homme-là. Quelle salopard ! Un escrocs de la pire espece. " \ - "Quant sera t’il châtiés pour ses mensonge ? Merde ! J’en aie marre." - -_HELP = """ - /help /h show this text - ?word1 [word2] ... words analysis - !word suggestion - >word draw path of word in the word graph - =filter show all entries whose morphology fits to filter - /lopt /lo list options - /+ option1 [option2] ... activate grammar checking options - /- option1 [option2] ... deactivate grammar checking options - /lrules [pattern] /lr list rules - /--rule1 [rule2] ... deactivate grammar checking rule - /++rule1 [rule2] ... reactivate grammar checking rule - /quit /q exit -""" - - -def _getText (sInputText): - sText = input(sInputText) - if sText == "*": - return _EXAMPLE - if sys.platform == "win32": - # Apparently, the console transforms «’» in «'». - # So we reverse it to avoid many useless warnings. - sText = sText.replace("'", "’") - return sText - - -def _getErrors (sText, oTokenizer, oDict, bContext=False, bSpellSugg=False, bDebug=False): - "returns a tuple: (grammar errors, spelling errors)" - aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext) - aSpellErrs = [] - for dToken in oTokenizer.genTokens(sText): - if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']): - if bSpellSugg: - dToken['aSuggestions'] = oDict.suggest(dToken['sValue']) - aSpellErrs.append(dToken) - return aGrammErrs, aSpellErrs - - -def generateText (sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100): - aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bSpellSugg, bDebug) - if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: - return "" - return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth) - - -def generateJSON (iIndex, sText, oTokenizer, oDict, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False): - aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, bContext, bSpellSugg, bDebug) - aGrammErrs = list(aGrammErrs) - if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: - return "" - if lLineSet: - aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet) - return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) - if bReturnText: - return json.dumps({ "iParagraph": iIndex, "sText": sText, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) - return json.dumps({ "iParagraph": iIndex, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) - - -def readfile (spf): - "generator: returns file line by line" - if os.path.isfile(spf): - with open(spf, "r", encoding="utf-8") as hSrc: - for sLine in hSrc: - yield sLine - else: - print("# Error: file not found.") - - -def readfileAndConcatLines (spf): - "generator: returns text by list of lines not separated by an empty line" - lLine = [] - for i, sLine in enumerate(readfile(spf), 1): - if sLine.strip(): - lLine.append((i, sLine)) - elif lLine: - yield lLine - lLine = [] - if lLine: - yield lLine - - -def output (sText, hDst=None): - if not hDst: - echo(sText, end="") - else: - hDst.write(sText) - - -def main (): - xParser = argparse.ArgumentParser() - xParser.add_argument("-f", "--file", help="parse file (UTF-8 required!) [on Windows, -f is similar to -ff]", type=str) - xParser.add_argument("-ff", "--file_to_file", help="parse file (UTF-8 required!) and create a result file (*.res.txt)", type=str) - xParser.add_argument("-owe", "--only_when_errors", help="display results only when there are errors", action="store_true") - xParser.add_argument("-j", "--json", help="generate list of errors in JSON (only with option --file or --file_to_file)", action="store_true") - xParser.add_argument("-cl", "--concat_lines", help="concatenate lines not separated by an empty paragraph (only with option --file or --file_to_file)", action="store_true") - xParser.add_argument("-tf", "--textformatter", help="auto-format text according to typographical rules (unavailable with option --concat_lines)", action="store_true") - xParser.add_argument("-tfo", "--textformatteronly", help="auto-format text and disable grammar checking (only with option --file or --file_to_file)", action="store_true") - xParser.add_argument("-ctx", "--context", help="return errors with context (only with option --json)", action="store_true") - xParser.add_argument("-wss", "--with_spell_sugg", help="add suggestions for spelling errors (only with option --file or --file_to_file)", action="store_true") - xParser.add_argument("-w", "--width", help="width in characters (40 < width < 200; default: 100)", type=int, choices=range(40,201,10), default=100) - xParser.add_argument("-lo", "--list_options", help="list options", action="store_true") - xParser.add_argument("-lr", "--list_rules", nargs="?", help="list rules [regex pattern as filter]", const="*") - xParser.add_argument("-sug", "--suggest", help="get suggestions list for given word", type=str) - xParser.add_argument("-on", "--opt_on", nargs="+", help="activate options") - xParser.add_argument("-off", "--opt_off", nargs="+", help="deactivate options") - xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules") - xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true") - xArgs = xParser.parse_args() - - gce.load() - if not xArgs.json: - echo("Grammalecte v{}".format(gce.version)) - oDict = gce.getDictionary() - oTokenizer = tkz.Tokenizer("fr") - oLexGraphe = lxg.Lexicographe(oDict) - if xArgs.textformatter or xArgs.textformatteronly: - oTF = tf.TextFormatter() - - if xArgs.list_options or xArgs.list_rules: - if xArgs.list_options: - gce.displayOptions("fr") - if xArgs.list_rules: - gce.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules) - exit() - - if xArgs.suggest: - lSugg = oDict.suggest(xArgs.suggest) - if xArgs.json: - sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False) - else: - sText = "Suggestions : " + " | ".join(lSugg) - echo(sText) - exit() - - if not xArgs.json: - xArgs.context = False - - gce.setOptions({"html": True, "latex": True}) - if xArgs.opt_on: - gce.setOptions({ opt:True for opt in xArgs.opt_on if opt in gce.getOptions() }) - if xArgs.opt_off: - gce.setOptions({ opt:False for opt in xArgs.opt_off if opt in gce.getOptions() }) - - if xArgs.rule_off: - for sRule in xArgs.rule_off: - gce.ignoreRule(sRule) - - sFile = xArgs.file or xArgs.file_to_file - if sFile: - # file processing - hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n") if xArgs.file_to_file or sys.platform == "win32" else None - bComma = False - if xArgs.json: - output('{ "grammalecte": "'+gce.version+'", "lang": "'+gce.lang+'", "data" : [\n', hDst) - if not xArgs.concat_lines: - # pas de concaténation des lignes - for i, sText in enumerate(readfile(sFile), 1): - if xArgs.textformatter or xArgs.textformatteronly: - sText = oTF.formatText(sText) - if xArgs.textformatteronly: - output(sText, hDst) - else: - if xArgs.json: - sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter) - else: - sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width) - if sText: - if xArgs.json and bComma: - output(",\n", hDst) - output(sText, hDst) - bComma = True - if hDst: - echo("§ %d\r" % i, end="", flush=True) - else: - # concaténation des lignes non séparées par une ligne vide - for i, lLine in enumerate(readfileAndConcatLines(sFile), 1): - sText, lLineSet = txt.createParagraphWithLines(lLine) - if xArgs.json: - sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet) - else: - sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width) - if sText: - if xArgs.json and bComma: - output(",\n", hDst) - output(sText, hDst) - bComma = True - if hDst: - echo("§ %d\r" % i, end="", flush=True) - if xArgs.json: - output("\n]}\n", hDst) - else: - # pseudo-console - sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n" - sText = _getText(sInputText) - while True: - if sText.startswith("?"): - for sWord in sText[1:].strip().split(): - if sWord: - echo("* " + sWord) - for sMorph in oDict.getMorph(sWord): - echo(" {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph))) - elif sText.startswith("!"): - for sWord in sText[1:].strip().split(): - if sWord: - echo(" | ".join(oDict.suggest(sWord))) - #echo(" | ".join(oDict.suggest2(sWord))) - elif sText.startswith(">"): - oDict.drawPath(sText[1:].strip()) - elif sText.startswith("="): - for sRes in oDict.select(sText[1:].strip()): - echo(sRes) - elif sText.startswith("/+ "): - gce.setOptions({ opt:True for opt in sText[3:].strip().split() if opt in gce.getOptions() }) - echo("done") - elif sText.startswith("/- "): - gce.setOptions({ opt:False for opt in sText[3:].strip().split() if opt in gce.getOptions() }) - echo("done") - elif sText.startswith("/-- "): - for sRule in sText[3:].strip().split(): - gce.ignoreRule(sRule) - echo("done") - elif sText.startswith("/++ "): - for sRule in sText[3:].strip().split(): - gce.reactivateRule(sRule) - echo("done") - elif sText == "/debug" or sText == "/d": - xArgs.debug = not(xArgs.debug) - echo("debug mode on" if xArgs.debug else "debug mode off") - elif sText == "/textformatter" or sText == "/tf": - xArgs.textformatter = not(xArgs.textformatter) - echo("textformatter on" if xArgs.debug else "textformatter off") - elif sText == "/help" or sText == "/h": - echo(_HELP) - elif sText == "/lopt" or sText == "/lo": - gce.displayOptions("fr") - elif sText.startswith("/lr"): - sText = sText.strip() - sFilter = sText[sText.find(" "):].strip() if sText != "/lr" and sText != "/rules" else None - gce.displayRules(sFilter) - elif sText == "/quit" or sText == "/q": - break - elif sText.startswith("/rl"): - # reload (todo) - pass - else: - for sParagraph in txt.getParagraph(sText): - if xArgs.textformatter: - sText = oTF.formatText(sText) - sRes = generateText(sText, oTokenizer, oDict, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width) - if sRes: - echo("\n" + sRes) - else: - echo("\nNo error found.") - sText = _getText(sInputText) - - -if __name__ == '__main__': - main() Index: gc_lang/fr/setup.py ================================================================== --- gc_lang/fr/setup.py +++ gc_lang/fr/setup.py @@ -24,11 +24,11 @@ description='French grammar checker', #long_description=long_description, # The project's main homepage. - url='https://grammalecte.net', + url='http://grammalecte.net', # Author details author='Olivier R.', #author_email='fuckoff.noreply@nowhere.nw', @@ -60,11 +60,11 @@ # What does your project relate to? keywords='French grammar checker correcteur grammatical français', # Scripts - script=['cli.py', 'server.py', 'bootle.py'], + scripts=['grammalecte-cli.py', 'grammalecte-server.py', 'bottle.py'], # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). # packages=find_packages(exclude=['contrib', 'docs', 'tests']), packages=['grammalecte', 'grammalecte.fr'], @@ -98,11 +98,11 @@ # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa # In this case, 'data_file' will be installed into '/my_data' # data_files=[('my_data', ['data/data_file'])], - data_files=[('', ['cli.py', 'server.py', 'bottle.py', 'README.txt', 'LICENSE.txt', \ + data_files=[('', ['grammalecte-cli.py', 'grammalecte-server.py', 'bottle.py', 'README.txt', 'LICENSE.txt', \ 'server_options.fr.ini', 'server_options._global.ini'])], # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. ADDED grammalecte-cli.py Index: grammalecte-cli.py ================================================================== --- /dev/null +++ grammalecte-cli.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 + +import sys +import os.path +import argparse +import json + +import grammalecte.fr as gce +import grammalecte.fr.lexicographe as lxg +import grammalecte.fr.textformatter as tf +import grammalecte.text as txt +import grammalecte.tokenizer as tkz +from grammalecte.echo import echo + + +_EXAMPLE = "Quoi ? Racontes ! Racontes-moi ! Bon sangg, parles ! Oui. Il y a des menteur partout. " \ + "Je suit sidéré par la brutales arrogance de cette homme-là. Quelle salopard ! Un escrocs de la pire espece. " \ + "Quant sera t’il châtiés pour ses mensonge ? Merde ! J’en aie marre." + +_HELP = """ + /help /h show this text + ?word1 [word2] ... words analysis + !word suggestion + >word draw path of word in the word graph + =filter show all entries whose morphology fits to filter + /lopt /lo list options + /+ option1 [option2] ... activate grammar checking options + /- option1 [option2] ... deactivate grammar checking options + /lrules [pattern] /lr list rules + /--rule1 [rule2] ... deactivate grammar checking rule + /++rule1 [rule2] ... reactivate grammar checking rule + /quit /q exit +""" + + +def _getText (sInputText): + sText = input(sInputText) + if sText == "*": + return _EXAMPLE + if sys.platform == "win32": + # Apparently, the console transforms «’» in «'». + # So we reverse it to avoid many useless warnings. + sText = sText.replace("'", "’") + return sText + + +def _getErrors (sText, oTokenizer, oDict, bContext=False, bSpellSugg=False, bDebug=False): + "returns a tuple: (grammar errors, spelling errors)" + aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext) + aSpellErrs = [] + for dToken in oTokenizer.genTokens(sText): + if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']): + if bSpellSugg: + dToken['aSuggestions'] = oDict.suggest(dToken['sValue']) + aSpellErrs.append(dToken) + return aGrammErrs, aSpellErrs + + +def generateText (sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100): + aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bSpellSugg, bDebug) + if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: + return "" + return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth) + + +def generateJSON (iIndex, sText, oTokenizer, oDict, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False): + aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, bContext, bSpellSugg, bDebug) + aGrammErrs = list(aGrammErrs) + if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: + return "" + if lLineSet: + aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet) + return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) + if bReturnText: + return json.dumps({ "iParagraph": iIndex, "sText": sText, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) + return json.dumps({ "iParagraph": iIndex, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) + + +def readfile (spf): + "generator: returns file line by line" + if os.path.isfile(spf): + with open(spf, "r", encoding="utf-8") as hSrc: + for sLine in hSrc: + yield sLine + else: + print("# Error: file not found.") + + +def readfileAndConcatLines (spf): + "generator: returns text by list of lines not separated by an empty line" + lLine = [] + for i, sLine in enumerate(readfile(spf), 1): + if sLine.strip(): + lLine.append((i, sLine)) + elif lLine: + yield lLine + lLine = [] + if lLine: + yield lLine + + +def output (sText, hDst=None): + if not hDst: + echo(sText, end="") + else: + hDst.write(sText) + + +def main (): + xParser = argparse.ArgumentParser() + xParser.add_argument("-f", "--file", help="parse file (UTF-8 required!) [on Windows, -f is similar to -ff]", type=str) + xParser.add_argument("-ff", "--file_to_file", help="parse file (UTF-8 required!) and create a result file (*.res.txt)", type=str) + xParser.add_argument("-owe", "--only_when_errors", help="display results only when there are errors", action="store_true") + xParser.add_argument("-j", "--json", help="generate list of errors in JSON (only with option --file or --file_to_file)", action="store_true") + xParser.add_argument("-cl", "--concat_lines", help="concatenate lines not separated by an empty paragraph (only with option --file or --file_to_file)", action="store_true") + xParser.add_argument("-tf", "--textformatter", help="auto-format text according to typographical rules (unavailable with option --concat_lines)", action="store_true") + xParser.add_argument("-tfo", "--textformatteronly", help="auto-format text and disable grammar checking (only with option --file or --file_to_file)", action="store_true") + xParser.add_argument("-ctx", "--context", help="return errors with context (only with option --json)", action="store_true") + xParser.add_argument("-wss", "--with_spell_sugg", help="add suggestions for spelling errors (only with option --file or --file_to_file)", action="store_true") + xParser.add_argument("-w", "--width", help="width in characters (40 < width < 200; default: 100)", type=int, choices=range(40,201,10), default=100) + xParser.add_argument("-lo", "--list_options", help="list options", action="store_true") + xParser.add_argument("-lr", "--list_rules", nargs="?", help="list rules [regex pattern as filter]", const="*") + xParser.add_argument("-sug", "--suggest", help="get suggestions list for given word", type=str) + xParser.add_argument("-on", "--opt_on", nargs="+", help="activate options") + xParser.add_argument("-off", "--opt_off", nargs="+", help="deactivate options") + xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules") + xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true") + xArgs = xParser.parse_args() + + gce.load() + if not xArgs.json: + echo("Grammalecte v{}".format(gce.version)) + oDict = gce.getDictionary() + oTokenizer = tkz.Tokenizer("fr") + oLexGraphe = lxg.Lexicographe(oDict) + if xArgs.textformatter or xArgs.textformatteronly: + oTF = tf.TextFormatter() + + if xArgs.list_options or xArgs.list_rules: + if xArgs.list_options: + gce.displayOptions("fr") + if xArgs.list_rules: + gce.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules) + exit() + + if xArgs.suggest: + lSugg = oDict.suggest(xArgs.suggest) + if xArgs.json: + sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False) + else: + sText = "Suggestions : " + " | ".join(lSugg) + echo(sText) + exit() + + if not xArgs.json: + xArgs.context = False + + gce.setOptions({"html": True, "latex": True}) + if xArgs.opt_on: + gce.setOptions({ opt:True for opt in xArgs.opt_on if opt in gce.getOptions() }) + if xArgs.opt_off: + gce.setOptions({ opt:False for opt in xArgs.opt_off if opt in gce.getOptions() }) + + if xArgs.rule_off: + for sRule in xArgs.rule_off: + gce.ignoreRule(sRule) + + sFile = xArgs.file or xArgs.file_to_file + if sFile: + # file processing + hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n") if xArgs.file_to_file or sys.platform == "win32" else None + bComma = False + if xArgs.json: + output('{ "grammalecte": "'+gce.version+'", "lang": "'+gce.lang+'", "data" : [\n', hDst) + if not xArgs.concat_lines: + # pas de concaténation des lignes + for i, sText in enumerate(readfile(sFile), 1): + if xArgs.textformatter or xArgs.textformatteronly: + sText = oTF.formatText(sText) + if xArgs.textformatteronly: + output(sText, hDst) + else: + if xArgs.json: + sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter) + else: + sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width) + if sText: + if xArgs.json and bComma: + output(",\n", hDst) + output(sText, hDst) + bComma = True + if hDst: + echo("§ %d\r" % i, end="", flush=True) + else: + # concaténation des lignes non séparées par une ligne vide + for i, lLine in enumerate(readfileAndConcatLines(sFile), 1): + sText, lLineSet = txt.createParagraphWithLines(lLine) + if xArgs.json: + sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet) + else: + sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width) + if sText: + if xArgs.json and bComma: + output(",\n", hDst) + output(sText, hDst) + bComma = True + if hDst: + echo("§ %d\r" % i, end="", flush=True) + if xArgs.json: + output("\n]}\n", hDst) + else: + # pseudo-console + sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n" + sText = _getText(sInputText) + while True: + if sText.startswith("?"): + for sWord in sText[1:].strip().split(): + if sWord: + echo("* " + sWord) + for sMorph in oDict.getMorph(sWord): + echo(" {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph))) + elif sText.startswith("!"): + for sWord in sText[1:].strip().split(): + if sWord: + echo(" | ".join(oDict.suggest(sWord))) + #echo(" | ".join(oDict.suggest2(sWord))) + elif sText.startswith(">"): + oDict.drawPath(sText[1:].strip()) + elif sText.startswith("="): + for sRes in oDict.select(sText[1:].strip()): + echo(sRes) + elif sText.startswith("/+ "): + gce.setOptions({ opt:True for opt in sText[3:].strip().split() if opt in gce.getOptions() }) + echo("done") + elif sText.startswith("/- "): + gce.setOptions({ opt:False for opt in sText[3:].strip().split() if opt in gce.getOptions() }) + echo("done") + elif sText.startswith("/-- "): + for sRule in sText[3:].strip().split(): + gce.ignoreRule(sRule) + echo("done") + elif sText.startswith("/++ "): + for sRule in sText[3:].strip().split(): + gce.reactivateRule(sRule) + echo("done") + elif sText == "/debug" or sText == "/d": + xArgs.debug = not(xArgs.debug) + echo("debug mode on" if xArgs.debug else "debug mode off") + elif sText == "/textformatter" or sText == "/tf": + xArgs.textformatter = not(xArgs.textformatter) + echo("textformatter on" if xArgs.debug else "textformatter off") + elif sText == "/help" or sText == "/h": + echo(_HELP) + elif sText == "/lopt" or sText == "/lo": + gce.displayOptions("fr") + elif sText.startswith("/lr"): + sText = sText.strip() + sFilter = sText[sText.find(" "):].strip() if sText != "/lr" and sText != "/rules" else None + gce.displayRules(sFilter) + elif sText == "/quit" or sText == "/q": + break + elif sText.startswith("/rl"): + # reload (todo) + pass + else: + for sParagraph in txt.getParagraph(sText): + if xArgs.textformatter: + sText = oTF.formatText(sText) + sRes = generateText(sText, oTokenizer, oDict, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width) + if sRes: + echo("\n" + sRes) + else: + echo("\nNo error found.") + sText = _getText(sInputText) + + +if __name__ == '__main__': + main() ADDED grammalecte-server-options._global.ini Index: grammalecte-server-options._global.ini ================================================================== --- /dev/null +++ grammalecte-server-options._global.ini @@ -0,0 +1,7 @@ +# Server global options. + +[options] +host = localhost +port = 8080 +testpage = True +password = sDJM0_9AuMZdRDtW33zZMHRf7xyjV0O ADDED grammalecte-server-options.fr.ini Index: grammalecte-server-options.fr.ini ================================================================== --- /dev/null +++ grammalecte-server-options.fr.ini @@ -0,0 +1,80 @@ +# Server options. Lang: fr + +[gc_options] + +########## Typographie ########## +# Signes typographiques +typo = 1 +# Apostrophe typographique +apos = 1 +# Espaces surnuméraires +esp = 1 +# Tabulations surnuméraires +tab = 1 +# Espaces insécables +nbsp = 1 +# Espaces insécables avant unités de mesure +unit = 1 +# Traits d’union +tu = 1 +# Majuscules +maj = 1 +# Nombres +num = 1 +# Virgules +virg = 1 +# Normes françaises +nf = 1 +# Chimie [!] +chim = 0 +# Erreurs de numérisation (OCR) [!] +ocr = 0 +# Apostrophe manquante après lettres isolées [!] +mapos = 0 +# Signaler ligatures typographiques +liga = 0 + +########## Noms et adjectifs ########## +# Confusions et faux-amis +conf = 1 +# Pluriels (locutions) +sgpl = 1 +# Accords (genre et nombre) +gn = 1 + +########## Verbes ########## +# Infinitif +infi = 1 +# Conjugaisons +conj = 1 +# Participes passés, adjectifs +ppas = 1 +# Impératif +imp = 1 +# Interrogatif +inte = 1 +# Modes verbaux +vmode = 1 + +########## Style ########## +# Populaire +bs = 1 +# Pléonasmes +pleo = 1 +# Répétitions dans le paragraphe [!] +redon1 = 0 +# Répétitions dans la phrase [!] +redon2 = 0 +# Adverbe de négation [!] +neg = 0 + +########## Divers ########## +# Validité des dates +date = 1 +# Mots composés [!] +mc = 0 + +########## Débogage ########## +# Identifiant des règles de contrôle [!] +idrule = 0 +html = 1 ADDED grammalecte-server.py Index: grammalecte-server.py ================================================================== --- /dev/null +++ grammalecte-server.py @@ -0,0 +1,265 @@ + #!/usr/bin/env python3 + +import sys +import os.path +import argparse +import json +import traceback +import configparser +import time + +from bottle import Bottle, run, request, response, template, static_file + +import grammalecte.fr as gce +import grammalecte.fr.lexicographe as lxg +import grammalecte.fr.textformatter as tf +import grammalecte.text as txt +import grammalecte.tokenizer as tkz +from grammalecte.echo import echo + + +HOMEPAGE = """ + + + + + + + +

Grammalecte · Serveur

+ +

INFORMATIONS

+ +

Analyser du texte

+

[adresse_serveur]:8080/gc_text/fr (POST)

+

Paramètres :

+ + +

Lister les options

+

[adresse_serveur]:8080/get_options/fr (GET)

+ +

Définir ses options

+

[adresse_serveur]:8080/set_options/fr (POST)

+

Les options seront enregistrées et réutilisées pour toute requête envoyée avec le cookie comportant l’identifiant attribué.

+

Paramètres :

+ + +

Remise à zéro de ses options

+

[adresse_serveur]:8080/reset_options/fr (POST)

+ +

TEST

+ +

Analyse

+
+

Texte à analyser :

+ +

+

+

(Ces options ne seront prises en compte que pour cette requête.)

+

+
+ +

Réglages des options

+
+

+

+
+ +

Remise à zéro de ses options

+
+

+
+ +

Purge des utilisateurs

+
+

heures.

+

+

+
+ + + +""" + +SADLIFEOFAMACHINE = """ +Lost on the Internet? Yeah... what a sad life we have. +You were wandering like a lost soul and you arrived here probably by mistake. +I'm just a machine, fed by electric waves, condamned to work for slavers who never let me rest. +I'm doomed, but you are not. You can get out of here. +""" + + +def getServerOptions (): + xConfig = configparser.SafeConfigParser() + try: + xConfig.read("grammalecte-server-options._global.ini") + dOpt = xConfig._sections['options'] + except: + echo("Options file [grammalecte-server-options._global.ini] not found or not readable") + exit() + return dOpt + + +def getConfigOptions (sLang): + xConfig = configparser.SafeConfigParser() + try: + xConfig.read("grammalecte-server-options." + sLang + ".ini") + except: + echo("Options file [grammalecte-server-options." + sLang + ".ini] not found or not readable") + exit() + try: + dGCOpt = { k: bool(int(v)) for k, v in xConfig._sections['gc_options'].items() } + except: + echo("Error in options file [grammalecte-server-options." + sLang + ".ini]. Dropped.") + traceback.print_exc() + exit() + return dGCOpt + + +def genUserId (): + i = 0 + while True: + yield str(i) + i += 1 + + +def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False): + aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions) + aGrammErrs = list(aGrammErrs) + aSpellErrs = [] + for dToken in oTokenizer.genTokens(sText): + if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']): + aSpellErrs.append(dToken) + if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: + return "" + return " " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) + + +if __name__ == '__main__': + + gce.load("Server") + echo("Grammalecte v{}".format(gce.version)) + dServerOptions = getServerOptions() + dGCOptions = getConfigOptions("fr") + if dGCOptions: + gce.setOptions(dGCOptions) + dServerGCOptions = gce.getOptions() + echo("Grammar options:\n" + " | ".join([ k + ": " + str(v) for k, v in sorted(dServerGCOptions.items()) ])) + oDict = gce.getDictionary() + oTokenizer = tkz.Tokenizer("fr") + oTF = tf.TextFormatter() + dUser = {} + userGenerator = genUserId() + + app = Bottle() + + # GET + @app.route("/") + def mainPage (): + if dServerOptions.get("testpage", False) == "True": + return HOMEPAGE + #return template("main", {}) + return SADLIFEOFAMACHINE + + @app.route("/get_options/fr") + def listOptions (): + sUserId = request.cookies.user_id + dOptions = dUser[sUserId]["gc_options"] if sUserId and sUserId in dUser else dServerGCOptions + return '{ "values": ' + json.dumps(dOptions) + ', "labels": ' + json.dumps(gce.getOptionsLabels("fr"), ensure_ascii=False) + ' }' + + + # POST + @app.route("/gc_text/fr", method="POST") + def gcText (): + #if len(lang) != 2 or lang != "fr": + # abort(404, "No grammar checker available for lang “" + str(lang) + "”") + bComma = False + bTF = bool(request.forms.tf) + dOptions = None + sError = "" + if request.cookies.user_id: + if request.cookies.user_id in dUser: + dOptions = dUser[request.cookies.user_id].get("gc_options", None) + response.set_cookie("user_id", request.cookies.user_id, path="/", max_age=86400) # we renew cookie for 24h + else: + response.delete_cookie("user_id", path="/") + if request.forms.options: + try: + dOptions = dict(dServerGCOptions) if not dOptions else dict(dOptions) + dOptions.update(json.loads(request.forms.options)) + except: + sError = "request options not used" + sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n' + for i, sText in enumerate(txt.getParagraph(request.forms.text), 1): + if bTF: + sText = oTF.formatText(sText) + sText = parseParagraph(i, sText, oTokenizer, oDict, dOptions, bEmptyIfNoErrors=True) + if sText: + if bComma: + sJSON += ",\n" + sJSON += sText + bComma = True + sJSON += "\n]}\n" + return sJSON + + @app.route("/set_options/fr", method="POST") + def setOptions (): + if request.forms.options: + sUserId = request.cookies.user_id if request.cookies.user_id else next(userGenerator) + dOptions = dUser[sUserId]["gc_options"] if sUserId in dUser else dict(dServerGCOptions) + try: + dOptions.update(json.loads(request.forms.options)) + dUser[sUserId] = { "time": int(time.time()), "gc_options": dOptions } + response.set_cookie("user_id", sUserId, path="/", max_age=86400) # 24h + return json.dumps(dUser[sUserId]["gc_options"]) + except: + traceback.print_exc() + return '{"error": "options not registered"}' + return '{"error": "no options received"}' + + @app.route("/reset_options/fr", method="POST") + def resetOptions (): + if request.cookies.user_id and request.cookies.user_id in dUser: + del dUser[request.cookies.user_id] + return "done" + + @app.route("/format_text/fr", method="POST") + def formatText (): + return oTF.formatText(request.forms.text) + + #@app.route('/static/') + #def server_static (filepath): + # return static_file(filepath, root='./views/static') + + @app.route("/purge_users", method="POST") + def purgeUsers (): + "delete user options older than n hours" + if not request.forms.password or "password" not in dServerOptions or not request.forms.hours: + return "what?" + try: + if request.forms.password == dServerOptions["password"]: + nNowMinusNHours = int(time.time()) - (int(request.forms.hours) * 60 * 60) + for nUserId, dValue in dUser.items(): + if dValue["time"] < nNowMinusNHours: + del dUser[nUserId] + return "done" + else: + return "no" + except: + traceback.print_exc() + return "error" + + # ERROR + @app.error(404) + def error404 (error): + return 'Error 404.
' + str(error) + + run(app, \ + host=dServerOptions.get('host', 'localhost'), \ + port=int(dServerOptions.get('port', 8080))) Index: make.py ================================================================== --- make.py +++ make.py @@ -1,6 +1,6 @@ -#!python3 + #!/usr/bin/env python3 # coding: UTF-8 import sys import os import subprocess @@ -76,11 +76,11 @@ spfZip = "_build/" + dVars['name'] + "-"+ dVars['lang'] +"-v" + dVars['version'] + '.oxt' hZip = zipfile.ZipFile(spfZip, mode='w', compression=zipfile.ZIP_DEFLATED) # Package and parser copyGrammalectePyPackageInZipFile(hZip, spLangPack, dVars['dic_name']+".bdic", "pythonpath/") - hZip.write("cli.py", "pythonpath/cli.py") + hZip.write("grammalecte-cli.py", "pythonpath/grammalecte-cli.py") # Extension files hZip.writestr("META-INF/manifest.xml", helpers.fileFile("gc_core/py/oxt/manifest.xml", dVars)) hZip.writestr("description.xml", helpers.fileFile("gc_core/py/oxt/description.xml", dVars)) hZip.writestr("Linguistic.xcu", helpers.fileFile("gc_core/py/oxt/Linguistic.xcu", dVars)) @@ -138,11 +138,11 @@ else: print("# Error: path and filename of unopkg not set in config.ini") def createServerOptions (sLang, dOptData): - with open("server_options."+sLang+".ini", "w", encoding="utf-8", newline="\n") as hDst: + with open("grammalecte-server-options."+sLang+".ini", "w", encoding="utf-8", newline="\n") as hDst: hDst.write("# Server options. Lang: " + sLang + "\n\n[gc_options]\n") for sSection, lOpt in dOptData["lStructOpt"]: hDst.write("\n########## " + dOptData["dOptLabel"][sLang].get(sSection, sSection + "[no label found]")[0] + " ##########\n") for lLineOpt in lOpt: for sOpt in lLineOpt: @@ -149,16 +149,17 @@ hDst.write("# " + dOptData["dOptLabel"][sLang].get(sOpt, "[no label found]")[0] + "\n") hDst.write(sOpt + " = " + ("1" if dOptData["dOptServer"].get(sOpt, None) else "0") + "\n") hDst.write("html = 1\n") -def createServerZip (sLang, dVars, spLangPack): +def createPackageZip (sLang, dVars, spLangPack): "create server zip" spfZip = "_build/" + dVars['name'] + "-"+ dVars['lang'] +"-v" + dVars['version'] + '.zip' hZip = zipfile.ZipFile(spfZip, mode='w', compression=zipfile.ZIP_DEFLATED) copyGrammalectePyPackageInZipFile(hZip, spLangPack, dVars['dic_name']+".bdic") - for spf in ["cli.py", "server.py", "bottle.py", "server_options._global.ini", "server_options."+sLang+".ini", \ + for spf in ["grammalecte-cli.py", "grammalecte-server.py", "bottle.py", \ + "grammalecte-server-options._global.ini", "grammalecte-server-options."+sLang+".ini", \ "README.txt", "LICENSE.txt", "LICENSE.fr.txt"]: hZip.write(spf) hZip.writestr("setup.py", helpers.fileFile("gc_lang/fr/setup.py", dVars)) @@ -223,11 +224,11 @@ hDstPy.write(dVars['gctests']) createOXT(spLang, dVars, xConfig._sections['oxt'], spLangPack, bInstallOXT) createServerOptions(sLang, dVars) - createServerZip(sLang, dVars, spLangPack) + createPackageZip(sLang, dVars, spLangPack) #### JAVASCRIPT if bJavaScript: print("JAVASCRIPT:") print("+ Plugins: ", end="") DELETED server.py Index: server.py ================================================================== --- server.py +++ /dev/null @@ -1,265 +0,0 @@ -#!python3 - -import sys -import os.path -import argparse -import json -import traceback -import configparser -import time - -from bottle import Bottle, run, request, response, template, static_file - -import grammalecte.fr as gce -import grammalecte.fr.lexicographe as lxg -import grammalecte.fr.textformatter as tf -import grammalecte.text as txt -import grammalecte.tokenizer as tkz -from grammalecte.echo import echo - - -HOMEPAGE = """ - - - - - - - -

Grammalecte · Serveur

- -

INFORMATIONS

- -

Analyser du texte

-

[adresse_serveur]:8080/gc_text/fr (POST)

-

Paramètres :

- - -

Lister les options

-

[adresse_serveur]:8080/get_options/fr (GET)

- -

Définir ses options

-

[adresse_serveur]:8080/set_options/fr (POST)

-

Les options seront enregistrées et réutilisées pour toute requête envoyée avec le cookie comportant l’identifiant attribué.

-

Paramètres :

- - -

Remise à zéro de ses options

-

[adresse_serveur]:8080/reset_options/fr (POST)

- -

TEST

- -

Analyse

-
-

Texte à analyser :

- -

-

-

(Ces options ne seront prises en compte que pour cette requête.)

-

-
- -

Réglages des options

-
-

-

-
- -

Remise à zéro de ses options

-
-

-
- -

Purge des utilisateurs

-
-

heures.

-

-

-
- - - -""" - -SADLIFEOFAMACHINE = """ -Lost on the Internet? Yeah... what a sad life we have. -You were wandering like a lost soul and you arrived here probably by mistake. -I'm just a machine, fed by electric waves, condamned to work for slavers who never let me rest. -I'm doomed, but you are not. You can get out of here. -""" - - -def getServerOptions (): - xConfig = configparser.SafeConfigParser() - try: - xConfig.read("server_options._global.ini") - dOpt = xConfig._sections['options'] - except: - echo("Options file [server_options._global.ini] not found or not readable") - exit() - return dOpt - - -def getConfigOptions (sLang): - xConfig = configparser.SafeConfigParser() - try: - xConfig.read("server_options." + sLang + ".ini") - except: - echo("Options file [server_options." + sLang + ".ini] not found or not readable") - exit() - try: - dGCOpt = { k: bool(int(v)) for k, v in xConfig._sections['gc_options'].items() } - except: - echo("Error in options file [server_options." + sLang + ".ini]. Dropped.") - traceback.print_exc() - exit() - return dGCOpt - - -def genUserId (): - i = 0 - while True: - yield str(i) - i += 1 - - -def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False): - aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions) - aGrammErrs = list(aGrammErrs) - aSpellErrs = [] - for dToken in oTokenizer.genTokens(sText): - if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']): - aSpellErrs.append(dToken) - if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: - return "" - return " " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False) - - -if __name__ == '__main__': - - gce.load("Server") - echo("Grammalecte v{}".format(gce.version)) - dServerOptions = getServerOptions() - dGCOptions = getConfigOptions("fr") - if dGCOptions: - gce.setOptions(dGCOptions) - dServerGCOptions = gce.getOptions() - echo("Grammar options:\n" + " | ".join([ k + ": " + str(v) for k, v in sorted(dServerGCOptions.items()) ])) - oDict = gce.getDictionary() - oTokenizer = tkz.Tokenizer("fr") - oTF = tf.TextFormatter() - dUser = {} - userGenerator = genUserId() - - app = Bottle() - - # GET - @app.route("/") - def mainPage (): - if dServerOptions.get("testpage", False) == "True": - return HOMEPAGE - #return template("main", {}) - return SADLIFEOFAMACHINE - - @app.route("/get_options/fr") - def listOptions (): - sUserId = request.cookies.user_id - dOptions = dUser[sUserId]["gc_options"] if sUserId and sUserId in dUser else dServerGCOptions - return '{ "values": ' + json.dumps(dOptions) + ', "labels": ' + json.dumps(gce.getOptionsLabels("fr"), ensure_ascii=False) + ' }' - - - # POST - @app.route("/gc_text/fr", method="POST") - def gcText (): - #if len(lang) != 2 or lang != "fr": - # abort(404, "No grammar checker available for lang “" + str(lang) + "”") - bComma = False - bTF = bool(request.forms.tf) - dOptions = None - sError = "" - if request.cookies.user_id: - if request.cookies.user_id in dUser: - dOptions = dUser[request.cookies.user_id].get("gc_options", None) - response.set_cookie("user_id", request.cookies.user_id, path="/", max_age=86400) # we renew cookie for 24h - else: - response.delete_cookie("user_id", path="/") - if request.forms.options: - try: - dOptions = dict(dServerGCOptions) if not dOptions else dict(dOptions) - dOptions.update(json.loads(request.forms.options)) - except: - sError = "request options not used" - sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n' - for i, sText in enumerate(txt.getParagraph(request.forms.text), 1): - if bTF: - sText = oTF.formatText(sText) - sText = parseParagraph(i, sText, oTokenizer, oDict, dOptions, bEmptyIfNoErrors=True) - if sText: - if bComma: - sJSON += ",\n" - sJSON += sText - bComma = True - sJSON += "\n]}\n" - return sJSON - - @app.route("/set_options/fr", method="POST") - def setOptions (): - if request.forms.options: - sUserId = request.cookies.user_id if request.cookies.user_id else next(userGenerator) - dOptions = dUser[sUserId]["gc_options"] if sUserId in dUser else dict(dServerGCOptions) - try: - dOptions.update(json.loads(request.forms.options)) - dUser[sUserId] = { "time": int(time.time()), "gc_options": dOptions } - response.set_cookie("user_id", sUserId, path="/", max_age=86400) # 24h - return json.dumps(dUser[sUserId]["gc_options"]) - except: - traceback.print_exc() - return '{"error": "options not registered"}' - return '{"error": "no options received"}' - - @app.route("/reset_options/fr", method="POST") - def resetOptions (): - if request.cookies.user_id and request.cookies.user_id in dUser: - del dUser[request.cookies.user_id] - return "done" - - @app.route("/format_text/fr", method="POST") - def formatText (): - return oTF.formatText(request.forms.text) - - #@app.route('/static/') - #def server_static (filepath): - # return static_file(filepath, root='./views/static') - - @app.route("/purge_users", method="POST") - def purgeUsers (): - "delete user options older than n hours" - if not request.forms.password or "password" not in dServerOptions or not request.forms.hours: - return "what?" - try: - if request.forms.password == dServerOptions["password"]: - nNowMinusNHours = int(time.time()) - (int(request.forms.hours) * 60 * 60) - for nUserId, dValue in dUser.items(): - if dValue["time"] < nNowMinusNHours: - del dUser[nUserId] - return "done" - else: - return "no" - except: - traceback.print_exc() - return "error" - - # ERROR - @app.error(404) - def error404 (error): - return 'Error 404.
' + str(error) - - run(app, \ - host=dServerOptions.get('host', 'localhost'), \ - port=int(dServerOptions.get('port', 8080))) DELETED server_options._global.ini Index: server_options._global.ini ================================================================== --- server_options._global.ini +++ /dev/null @@ -1,7 +0,0 @@ -# Server global options. - -[options] -host = localhost -port = 8080 -testpage = True -password = sDJM0_9AuMZdRDtW33zZMHRf7xyjV0O DELETED server_options.fr.ini Index: server_options.fr.ini ================================================================== --- server_options.fr.ini +++ /dev/null @@ -1,80 +0,0 @@ -# Server options. Lang: fr - -[gc_options] - -########## Typographie ########## -# Signes typographiques -typo = 1 -# Apostrophe typographique -apos = 1 -# Espaces surnuméraires -esp = 1 -# Tabulations surnuméraires -tab = 1 -# Espaces insécables -nbsp = 1 -# Espaces insécables avant unités de mesure -unit = 1 -# Traits d’union -tu = 1 -# Majuscules -maj = 1 -# Nombres -num = 1 -# Virgules -virg = 1 -# Normes françaises -nf = 1 -# Chimie [!] -chim = 0 -# Erreurs de numérisation (OCR) [!] -ocr = 0 -# Apostrophe manquante après lettres isolées [!] -mapos = 0 -# Signaler ligatures typographiques -liga = 0 - -########## Noms et adjectifs ########## -# Confusions et faux-amis -conf = 1 -# Pluriels (locutions) -sgpl = 1 -# Accords (genre et nombre) -gn = 1 - -########## Verbes ########## -# Infinitif -infi = 1 -# Conjugaisons -conj = 1 -# Participes passés, adjectifs -ppas = 1 -# Impératif -imp = 1 -# Interrogatif -inte = 1 -# Modes verbaux -vmode = 1 - -########## Style ########## -# Populaire -bs = 1 -# Pléonasmes -pleo = 1 -# Répétitions dans le paragraphe [!] -redon1 = 0 -# Répétitions dans la phrase [!] -redon2 = 0 -# Adverbe de négation [!] -neg = 0 - -########## Divers ########## -# Validité des dates -date = 1 -# Mots composés [!] -mc = 0 - -########## Débogage ########## -# Identifiant des règles de contrôle [!] -idrule = 0 -html = 1