Index: compile_rules_graph.py ================================================================== --- compile_rules_graph.py +++ compile_rules_graph.py @@ -12,35 +12,30 @@ dACTIONS = {} dFUNCTIONS = {} -def prepareFunction (s, bTokenValue=False): +def prepareFunction (s): "convert simple rule syntax to a string of Python code" s = s.replace("__also__", "bCondMemo") s = s.replace("__else__", "not bCondMemo") s = re.sub(r"(morph|analyse|value|displayInfo)[(]\\(\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(select|exclude|define|define_from)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset], dTags', s) s = re.sub(r"space_after[(][\\](\d+)", 'g_space_between_tokens(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) s = re.sub(r"analyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) - s = re.sub(r"(switchGender|has(?:(?:Mas|Fem)Form)|Simil)[(]\\(\d+)", '\\1(lToken[\\2+nTokenOffset]["sValue"]', s) + #s = re.sub(r"(switchGender|has(?:(?:Mas|Fem)Form)|Simil)[(]\\(\d+)", '\\1(lToken[\\2+nTokenOffset]["sValue"]', s) s = re.sub(r"(morph|analyse|value)\(>1", 'g_\\1(lToken[nLastToken+1]', s) # next token s = re.sub(r"(morph|analyse|value)\(<1", 'g_\\1(lToken[nTokenOffset]', s) # previous token - s = re.sub(r"[\\](\d+)\.is(upper|lower|title)\(\)", 'lToken[\\1+nTokenOffset]["sValue"].is\\2()', s) - s = re.sub(r"[\\](\d+)\.(startswith|endswith)\(", 'lToken[\\1+nTokenOffset]["sValue"].\\2(', s) + #s = re.sub(r"[\\](\d+)\.is(upper|lower|title)\(\)", 'lToken[\\1+nTokenOffset]["sValue"].is\\2()', s) + #s = re.sub(r"[\\](\d+)\.(startswith|endswith)\(", 'lToken[\\1+nTokenOffset]["sValue"].\\2(', s) s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s) s = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', s) # before(s) s = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after(s) s = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', s) # before0(s) s = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after0(s) - if bTokenValue: - # token values are used as parameter - s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]["sValue"]', s) - else: - # tokens used as parameter - s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]', s) + s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]["sValue"]', s) return s def genTokenLines (sTokenLine, dDef): "tokenize a string and return a list of lines of tokens" @@ -217,11 +212,11 @@ if mURL: sURL = mURL.group(1).strip() sMsg = sMsg[:mURL.start(0)].strip() checkTokenNumbers(sMsg, sActionId, nToken) if sMsg[0:1] == "=": - sMsg = prepareFunction(sMsg[1:], True) + sMsg = prepareFunction(sMsg[1:]) dFUNCTIONS["g_m_"+sActionId] = sMsg sMsg = "=g_m_"+sActionId else: checkIfThereIsCode(sMsg, sActionId) @@ -239,11 +234,11 @@ checkIfThereIsCode(sAction, sActionId) if cAction == "-": ## error detected --> suggestion if sAction[0:1] == "=": - sAction = prepareFunction(sAction, True) + sAction = prepareFunction(sAction) dFUNCTIONS["_g_s_"+sActionId] = sAction[1:] sAction = "=_g_s_"+sActionId elif sAction.startswith('"') and sAction.endswith('"'): sAction = sAction[1:-1] if not sMsg: @@ -250,11 +245,11 @@ print("# Error in action at line " + sActionId + ": The message is empty.") return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction, nPriority, sMsg, sURL] elif cAction == "~": ## text processor if sAction[0:1] == "=": - sAction = prepareFunction(sAction, True) + sAction = prepareFunction(sAction) dFUNCTIONS["_g_p_"+sActionId] = sAction[1:] sAction = "=_g_p_"+sActionId elif sAction.startswith('"') and sAction.endswith('"'): sAction = sAction[1:-1] return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction]