Overview
Comment: | [build] fix rules generation when first token was multiple and a selected group |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | build | rg |
Files: | files | file ages | folders |
SHA3-256: |
9d39408049f4ddf774c119390c6817e5 |
User & Date: | olr on 2018-07-15 11:33:00 |
Other Links: | branch diff | manifest | tags |
Context
2018-07-16
| ||
13:37 | [build] changeReferenceToken before prepareFunction check-in: 7d80af29a6 user: olr tags: build, rg | |
2018-07-15
| ||
11:33 | [build] fix rules generation when first token was multiple and a selected group check-in: 9d39408049 user: olr tags: build, rg | |
09:43 | [build] prepareFunctions: use token value by default check-in: 450e1ec422 user: olr tags: build, rg | |
Changes
Modified compile_rules_graph.py from [58681b66b6] to [60e97fc2e5].
︙ | ︙ | |||
19 20 21 22 23 24 25 | s = s.replace("__also__", "bCondMemo") s = s.replace("__else__", "not bCondMemo") s = re.sub(r"(morph|analyse|value|displayInfo)[(]\\(\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(select|exclude|define|define_from)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset], dTags', s) s = re.sub(r"space_after[(][\\](\d+)", 'g_space_between_tokens(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) s = re.sub(r"analyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) | < < < | 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | s = s.replace("__also__", "bCondMemo") s = s.replace("__else__", "not bCondMemo") s = re.sub(r"(morph|analyse|value|displayInfo)[(]\\(\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(select|exclude|define|define_from)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset], dTags', s) s = re.sub(r"space_after[(][\\](\d+)", 'g_space_between_tokens(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) s = re.sub(r"analyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) s = re.sub(r"(morph|analyse|value)\(>1", 'g_\\1(lToken[nLastToken+1]', s) # next token s = re.sub(r"(morph|analyse|value)\(<1", 'g_\\1(lToken[nTokenOffset]', s) # previous token s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s) s = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', s) # before(s) s = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after(s) s = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', s) # before0(s) s = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after0(s) s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]["sValue"]', s) return s |
︙ | ︙ | |||
54 55 56 57 58 59 60 | if ( (sToken.startswith("[") and sToken.endswith("]")) or (sToken.startswith("([") and sToken.endswith("])")) ): # multiple token bSelectedGroup = sToken.startswith("(") and sToken.endswith(")") if bSelectedGroup: sToken = sToken[1:-1] lNewToken = sToken[1:-1].split("|") if not lTokenLines: | | | 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 | if ( (sToken.startswith("[") and sToken.endswith("]")) or (sToken.startswith("([") and sToken.endswith("])")) ): # multiple token bSelectedGroup = sToken.startswith("(") and sToken.endswith(")") if bSelectedGroup: sToken = sToken[1:-1] lNewToken = sToken[1:-1].split("|") if not lTokenLines: lTokenLines = [ ["("+s+")"] for s in lNewToken ] if bSelectedGroup else [ [s] for s in lNewToken ] if bNullPossible: lTokenLines.extend([ [] for i in range(len(lNewToken)+1) ]) else: lNewTemp = [] if bNullPossible: for aRule in lTokenLines: for sElem in lNewToken: |
︙ | ︙ | |||
100 101 102 103 104 105 106 107 108 109 110 111 112 113 | def createRule (iLine, sRuleName, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef): "generator: create rule as list" # print(iLine, "//", sRuleName, "//", sTokenLine, "//", sActions, "//", nPriority) for lToken in genTokenLines(sTokenLine, dDef): # Calculate positions dPos = {} # key: iGroup, value: iToken iGroup = 0 for i, sToken in enumerate(lToken): if sToken.startswith("(") and sToken.endswith(")"): lToken[i] = sToken[1:-1] iGroup += 1 dPos[iGroup] = i + 1 # we add 1, for we count tokens from 1 to n (not from 0) # Parse actions | > > | 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | def createRule (iLine, sRuleName, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef): "generator: create rule as list" # print(iLine, "//", sRuleName, "//", sTokenLine, "//", sActions, "//", nPriority) for lToken in genTokenLines(sTokenLine, dDef): # Calculate positions dPos = {} # key: iGroup, value: iToken iGroup = 0 if iLine == 2211: print(lToken) for i, sToken in enumerate(lToken): if sToken.startswith("(") and sToken.endswith(")"): lToken[i] = sToken[1:-1] iGroup += 1 dPos[iGroup] = i + 1 # we add 1, for we count tokens from 1 to n (not from 0) # Parse actions |
︙ | ︙ | |||
309 310 311 312 313 314 315 | sRuleName = m.group(1) iActionBlock = 1 nPriority = int(m.group(2)[1:]) if m.group(2) else -1 else: print("Error at rule group: ", sLine, " -- line:", i) break elif re.search("^ +<<- ", sLine) or sLine.startswith(" ") \ | | | 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 | sRuleName = m.group(1) iActionBlock = 1 nPriority = int(m.group(2)[1:]) if m.group(2) else -1 else: print("Error at rule group: ", sLine, " -- line:", i) break elif re.search("^ +<<- ", sLine) or sLine.startswith(" ") \ or re.search("^ +#", sLine) or re.search(r"^ [-~=>/](?:\d\.?(?::\.?\d+|)|)>> ", sLine) : # actions sActions += " " + sLine.strip() elif re.match("[ ]*$", sLine): # empty line to end merging if not lTokenLine: continue if not sActions: |
︙ | ︙ | |||
345 346 347 348 349 350 351 | for i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority in lRuleLine: for lRule in createRule(i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef): lPreparedRule.append(lRule) # Graph creation oDARG = darg.DARG(lPreparedRule, sLang) dAllGraph[sGraphName] = oDARG.createGraph() # Debugging | > | | | | > | | | | 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 | for i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority in lRuleLine: for lRule in createRule(i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef): lPreparedRule.append(lRule) # Graph creation oDARG = darg.DARG(lPreparedRule, sLang) dAllGraph[sGraphName] = oDARG.createGraph() # Debugging if False: print("\nRULES:") for e in lPreparedRule: if e[-2] == "##2211": print(e) if False: print("\nGRAPH:", sGraphName) for k, v in dAllGraph[sGraphName].items(): print(k, "\t", v) # creating file with all functions callable by rules print(" creating callables...") sPyCallables = "# generated code, do not edit\n" #sJSCallables = "// generated code, do not edit\nconst oEvalFunc = {\n" for sFuncName, sReturn in dFUNCTIONS.items(): if sFuncName.startswith("_g_c_"): # condition |
︙ | ︙ |