Overview
Comment: | [build] graph builder: small update |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | build | rg |
Files: | files | file ages | folders |
SHA3-256: |
ab72f24c4f7474b636633c9027fe7e11 |
User & Date: | olr on 2018-08-17 21:43:58 |
Other Links: | branch diff | manifest | tags |
Context
2018-08-18
| ||
10:39 | [fr] conversion: regex rules -> graph rules check-in: 16d8b47671 user: olr tags: fr, rg | |
2018-08-17
| ||
21:43 | [build] graph builder: small update check-in: ab72f24c4f user: olr tags: build, rg | |
21:43 | [fr] conversion: regex rules -> graph rules check-in: 95247d8211 user: olr tags: fr, rg | |
Changes
Modified compile_rules_graph.py from [482c80fae9] to [899bcecea3].
︙ | ︙ | |||
27 28 29 30 31 32 33 | return sFuncName if not bStartWithEqual else "="+sFuncName def storeAction (sActionId, aAction): "store <aAction> in <dACTIONS> avoiding duplicates" nVar = 0 while True: | | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | return sFuncName if not bStartWithEqual else "="+sFuncName def storeAction (sActionId, aAction): "store <aAction> in <dACTIONS> avoiding duplicates" nVar = 0 while True: sActionName = sActionId + "_" + str(nVar) if sActionName not in dACTIONS: dACTIONS[sActionName] = aAction return sActionName elif aAction == dACTIONS[sActionName]: return sActionName nVar += 1 |
︙ | ︙ | |||
62 63 64 65 66 67 68 69 70 71 72 73 74 75 | sCode = re.sub(r"(morph|analyse|value)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', sCode) # next token sCode = re.sub(r"(morph|analyse|value)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', sCode) # previous token sCode = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', sCode) sCode = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before(sCode) sCode = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after(sCode) sCode = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before0(sCode) sCode = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after0(sCode) sCode = re.sub(r"[\\](\d+)", 'lToken[nTokenOffset+\\1]["sValue"]', sCode) sCode = re.sub(r"[\\]-(\d+)", 'lToken[nLastToken-\\1+1]["sValue"]', sCode) return sCode def genTokenLines (sTokenLine, dDef): "tokenize a string and return a list of lines of tokens" | > | 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | sCode = re.sub(r"(morph|analyse|value)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', sCode) # next token sCode = re.sub(r"(morph|analyse|value)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', sCode) # previous token sCode = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', sCode) sCode = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before(sCode) sCode = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after(sCode) sCode = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before0(sCode) sCode = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after0(sCode) sCode = re.sub(r"analyseWord[(]", 'analyse(', sCode) sCode = re.sub(r"[\\](\d+)", 'lToken[nTokenOffset+\\1]["sValue"]', sCode) sCode = re.sub(r"[\\]-(\d+)", 'lToken[nLastToken-\\1+1]["sValue"]', sCode) return sCode def genTokenLines (sTokenLine, dDef): "tokenize a string and return a list of lines of tokens" |
︙ | ︙ |