Overview
Comment: | [build][core][fr] fix selection from the pattern end |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | fr | core | build | rg |
Files: | files | file ages | folders |
SHA3-256: |
3e1100a2fdc57338514cdcd3d7345990 |
User & Date: | olr on 2018-07-31 19:19:40 |
Other Links: | branch diff | manifest | tags |
Context
2018-07-31
| ||
19:32 | [build] functions rewriting update check-in: cbcedcb422 user: olr tags: build, rg | |
19:19 | [build][core][fr] fix selection from the pattern end check-in: 3e1100a2fd user: olr tags: fr, core, build, rg | |
18:49 | [core][build] selected groups rework (allow selection from the pattern end) check-in: 9cb05aa144 user: olr tags: core, build, rg | |
Changes
Modified compile_rules_graph.py from [a2cfd69492] to [f1b39b4a98].
︙ | |||
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | + + + | def prepareFunction (s): "convert simple rule syntax to a string of Python code" s = s.replace("__also__", "bCondMemo") s = s.replace("__else__", "not bCondMemo") s = s.replace("sContext", "_sAppContext") s = re.sub(r"(morph|morphVC|analyse|value|displayInfo)[(]\\(\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(morph|morphVC|analyse|value|displayInfo)[(]\\-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', s) s = re.sub(r"(select|exclude|define|define_from)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset]', s) s = re.sub(r"(select|exclude|define|define_from)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', s) s = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[\\2+nTokenOffset], dTags', s) s = re.sub(r"space_after[(][\\](\d+)", 'g_space_between_tokens(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) s = re.sub(r"analyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[\\1+nTokenOffset], lToken[\\1+nTokenOffset+1]', s) s = re.sub(r"(morph|analyse|value)\(>1", 'g_\\1(lToken[nLastToken+1]', s) # next token s = re.sub(r"(morph|analyse|value)\(<1", 'g_\\1(lToken[nTokenOffset]', s) # previous token s = re.sub(r"(morph|analyse|value)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', s) # next token s = re.sub(r"(morph|analyse|value)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', s) # previous token s = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', s) s = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', s) # before(s) s = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after(s) s = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', s) # before0(s) s = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', s) # after0(s) s = re.sub(r"[\\](\d+)", 'lToken[\\1+nTokenOffset]["sValue"]', s) s = re.sub(r"[\\]-(\d+)", 'lToken[nLastToken-\\1+1]["sValue"]', s) return s def genTokenLines (sTokenLine, dDef): "tokenize a string and return a list of lines of tokens" lToken = sTokenLine.split() lTokenLines = None |
︙ | |||
156 157 158 159 160 161 162 | 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | - + | m = re.match("/(\\w+)/", sAction) if m: sOption = m.group(1) sAction = sAction[m.end():].strip() if nPriority == -1: nPriority = dOptPriority.get(sOption, 4) # valid action? |
︙ | |||
378 379 380 381 382 383 384 | 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 | - + | #sJSCallables = "// generated code, do not edit\nconst oEvalFunc = {\n" for sFuncName, sReturn in dFUNCTIONS.items(): if sFuncName.startswith("_g_c_"): # condition sParams = "lToken, nTokenOffset, nLastToken, sCountry, bCondMemo, dTags, sSentence, sSentence0" elif sFuncName.startswith("g_m_"): # message sParams = "lToken, nTokenOffset" elif sFuncName.startswith("_g_s_"): # suggestion |
︙ |
Modified gc_core/py/lang_core/gc_engine.py from [58acfdcc67] to [c3ab96bbbf].
︙ | |||
787 788 789 790 791 792 793 | 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 | - + | iTokenStart, iTokenEnd, cStartLimit, cEndLimit, nPriority, sMessage, sURL = eAct nTokenErrorStart = nTokenOffset + iTokenStart if iTokenStart > 0 else nLastToken + iTokenStart if "bImmune" not in self.lToken[nTokenErrorStart]: nTokenErrorEnd = nTokenOffset + iTokenEnd if iTokenEnd > 0 else nLastToken + iTokenEnd nErrorStart = self.nOffsetWithinParagraph + (self.lToken[nTokenErrorStart]["nStart"] if cStartLimit == "<" else self.lToken[nTokenErrorStart]["nEnd"]) nErrorEnd = self.nOffsetWithinParagraph + (self.lToken[nTokenErrorEnd]["nEnd"] if cEndLimit == ">" else self.lToken[nTokenErrorEnd]["nStart"]) if nErrorStart not in self.dError or nPriority > dPriority.get(nErrorStart, -1): |
︙ | |||
837 838 839 840 841 842 843 | 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 | - + - + | if bDebug: print(" COND_BREAK") break except Exception as e: raise Exception(str(e), sLineId, sRuleId, self.sSentence) return bChange |
︙ |
Modified gc_lang/fr/rules.grx from [99a2dff026] to [10b0cde65f].
︙ | |||
11070 11071 11072 11073 11074 11075 11076 | 11070 11071 11072 11073 11074 11075 11076 11077 11078 11079 11080 11081 11082 11083 11084 11085 | - - - + + | TEST: {{dés}} que nous sommes partis, il a piqué une crise TEST: {{des}} {{les}} premiers symptômes, appelez-moi # desceller / déceler / desseller __erreur_déceler_desseller_desceller__ [>erreur|>faute|>incohérence|>problème|>bug|>bogue|>faille|>maladie|>défaut|>défaillance|>perturbation|>irrégularité] <> [>desseller|>desceller] |
︙ |