Overview
| Comment: | [build][core] function info for tokens |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | trunk | core | build |
| Files: | files | file ages | folders |
| SHA3-256: |
d82e5dd0218f71f1d7d4967ed8c04507 |
| User & Date: | olr on 2021-01-05 15:53:17 |
| Other Links: | manifest | tags |
Context
|
2021-01-05
| ||
| 16:39 | [core][fr] suggMasPlur: allow self suggestion check-in: 6544c2d607 user: olr tags: trunk, fr, core | |
| 15:53 | [build][core] function info for tokens check-in: d82e5dd021 user: olr tags: trunk, core, build | |
|
2021-01-04
| ||
| 19:28 | [fx][tb] editor: fix wrong variable name check-in: 03ae204c73 user: olr tags: trunk, tb, fx | |
Changes
Modified compile_rules_graph.py from [d4d961a0bc] to [0195f7a15c].
| ︙ | ︙ | |||
44 45 46 47 48 49 50 |
sCode = re.sub(r"\b(select|define|definefrom|rewrite|addmorph|setmeta)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], dTags', sCode)
sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1], dTags', sCode)
sCode = re.sub(r"\bspace[(][\\](\d+)", 'g_space(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"\bspace[(][\\]-(\d+)", 'g_space(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"\bmorph2[(][\\](\d+)", 'g_morph2(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"\bmorph2[(][\\]-(\d+)", 'g_morph2(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
| | | | | | 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
sCode = re.sub(r"\b(select|define|definefrom|rewrite|addmorph|setmeta)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], dTags', sCode)
sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1], dTags', sCode)
sCode = re.sub(r"\bspace[(][\\](\d+)", 'g_space(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"\bspace[(][\\]-(\d+)", 'g_space(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"\bmorph2[(][\\](\d+)", 'g_morph2(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"\bmorph2[(][\\]-(\d+)", 'g_morph2(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"\b(morph0?|tag|meta|value|info)\(>1", 'g_\\1(lToken[nLastToken+1]', sCode) # next token
sCode = re.sub(r"\b(morph0?|tag|meta|value|info)\(<1", 'g_\\1(lToken[nTokenOffset]', sCode) # previous token
sCode = re.sub(r"\b(morph0?|tag|meta|value|info)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', sCode) # next token
sCode = re.sub(r"\b(morph0?|tag|meta|value|info)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', sCode) # previous token
sCode = re.sub(r"\bspace[(](>1)", 'g_space(lToken[nLastToken+1], g_token(lToken, nLastToken+2)', sCode) # next token
sCode = re.sub(r"\bspace[(](<1)", 'g_space(lToken[nTokenOffset], lToken[nTokenOffset+1]', sCode) # previous token
sCode = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', sCode)
sCode = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before(sCode)
sCode = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after(sCode)
sCode = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before0(sCode)
sCode = re.sub(r"\bafter0\(\s*", 'look(sSentence0[lToken[nLastToken]["nEnd"]:], ', sCode) # after0(sCode)
|
| ︙ | ︙ |
Modified gc_core/py/lang_core/gc_functions.py from [9e52227dd2] to [8d220fa67a].
| ︙ | ︙ | |||
147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
#### Analyse tokens for graph rules
def g_value (dToken, sValues, nLeft=None, nRight=None):
"test if <dToken['sValue']> is in sValues (each value should be separated with |)"
sValue = "|"+dToken["sValue"]+"|" if nLeft is None else "|"+dToken["sValue"][slice(nLeft, nRight)]+"|"
if sValue in sValues:
return True
if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to make valid words such as "Laissez-les", "Passe-partout".
| > > > > > > > > > > > > > > | 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
return False
# search sPattern
zPattern = re.compile(sPattern)
return any(zPattern.search(sMorph) for sMorph in lMorph)
#### Analyse tokens for graph rules
def g_info (dToken):
"for debugging: retrieve info of word"
if not dToken:
echo("> no token")
return True
lMorph = dToken["lMorph"] if "lMorph" in dToken else _oSpellChecker.getMorph(dToken["sValue"])
if not lMorph:
echo("> not in dictionary: " + dToken["sValue"])
return True
for sKey, val in dToken.items():
echo(sKey + ":" + str(val))
return True
def g_value (dToken, sValues, nLeft=None, nRight=None):
"test if <dToken['sValue']> is in sValues (each value should be separated with |)"
sValue = "|"+dToken["sValue"]+"|" if nLeft is None else "|"+dToken["sValue"][slice(nLeft, nRight)]+"|"
if sValue in sValues:
return True
if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to make valid words such as "Laissez-les", "Passe-partout".
|
| ︙ | ︙ |