Index: lex_build.py ================================================================== --- lex_build.py +++ lex_build.py @@ -7,13 +7,13 @@ import graphspell.dawg as fsa from graphspell.ibdawg import IBDAWG -def build (spfSrc, sLangCode, sLangName, sfDict, bJSON=False, sDicName="", sFilter="", cStemmingMethod="S", nCompressMethod=1): +def build (spfSrc, sLangCode, sLangName, sfDict, bJSON=False, sDicName="", sDescription="", sFilter="", cStemmingMethod="S", nCompressMethod=1): "transform a text lexicon as a binary indexable dictionary" - oDAWG = fsa.DAWG(spfSrc, cStemmingMethod, sLangCode, sLangName, sDicName, sFilter) + oDAWG = fsa.DAWG(spfSrc, cStemmingMethod, sLangCode, sLangName, sDicName, sDescription, sFilter) dir_util.mkpath("graphspell/_dictionaries") oDAWG.writeInfo("graphspell/_dictionaries/" + sfDict + ".info.txt") oDAWG.writeBinary("graphspell/_dictionaries/" + sfDict + ".bdic", int(nCompressMethod)) if bJSON: dir_util.mkpath("graphspell-js/_dictionaries") @@ -29,10 +29,10 @@ xParser.add_argument("dic_filename", type=str, help="dictionary file name (without extension)") xParser.add_argument("-js", "--json", help="Build dictionary in JSON", action="store_true") xParser.add_argument("-s", "--stemming", help="stemming method: S=suffixes, A=affixes, N=no stemming", type=str, choices=["S", "A", "N"], default="S") xParser.add_argument("-c", "--compress", help="compression method: 1, 2 (beta), 3, (beta)", type=int, choices=[1, 2, 3], default=1) xArgs = xParser.parse_args() - build(xArgs.src_lexicon, xArgs.lang_code, xArgs.lang_name, xArgs.dic_filename, xArgs.json) - + build(xArgs.src_lexicon, xArgs.lang_code, xArgs.lang_name, xArgs.dic_filename, "", xArgs.json) + if __name__ == '__main__': main() Index: make.py ================================================================== --- make.py +++ make.py @@ -350,11 +350,11 @@ spfLexSrc = dVars['lexicon_src'] lSfDictDst = dVars['dic_filenames'].split(",") lDicName = dVars['dic_name'].split(",") lFilter = dVars['dic_filter'].split(",") for sfDictDst, sDicName, sFilter in zip(lSfDictDst, lDicName, lFilter): - lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, sFilter, dVars['stemming_method'], int(dVars['fsa_method'])) + lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, "", sFilter, dVars['stemming_method'], int(dVars['fsa_method'])) else: if sType == "extended": spfLexSrc = dVars['lexicon_extended_src'] sfDictDst = dVars['dic_extended_filename'] sDicName = dVars['dic_extended_name'] @@ -364,11 +364,11 @@ sDicName = dVars['dic_community_name'] elif sType == "personal": spfLexSrc = dVars['lexicon_personal_src'] sfDictDst = dVars['dic_personal_filename'] sDicName = dVars['dic_personal_name'] - lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, "", dVars['stemming_method'], int(dVars['fsa_method'])) + lex_build.build(spfLexSrc, dVars['lang'], dVars['lang_name'], sfDictDst, bJavaScript, sDicName, "", "", dVars['stemming_method'], int(dVars['fsa_method'])) def main (): "build Grammalecte with requested options"