Overview
| Comment: | [core] ibdawg: use SuggResult object + code cleaning |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | spellsugg |
| Files: | files | file ages | folders |
| SHA3-256: |
6c8c1776f596c8e052f5bf41bed27c38 |
| User & Date: | olr on 2017-11-07 17:03:03 |
| Other Links: | branch diff | manifest | tags |
Context
|
2017-11-07
| ||
| 17:39 | [core] ibdawg: suggest > DamerauLevenshtein seems better than Sift4 check-in: 5c78444baa user: olr tags: core, spellsugg | |
| 17:03 | [core] ibdawg: use SuggResult object + code cleaning check-in: 6c8c1776f5 user: olr tags: core, spellsugg | |
| 11:56 | [core] ibdawg: another suggestion method check-in: 1edae62ad8 user: olr tags: core, spellsugg | |
Changes
Modified gc_core/py/char_player.py from [19df5311c8] to [aea8dd1016].
| ︙ | ︙ | |||
14 15 16 17 18 19 20 |
'œ': 'oe', 'æ': 'ae',
})
def cleanWord (sWord):
"word simplication before calculating distance between words"
return sWord.lower().translate(_xTransChars).replace("eau", "o").replace("au", "o")
| < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
'œ': 'oe', 'æ': 'ae',
})
def cleanWord (sWord):
"word simplication before calculating distance between words"
return sWord.lower().translate(_xTransChars).replace("eau", "o").replace("au", "o")
# Similar chars
d1to1 = {
"1": "liîLIÎ",
"2": "zZ",
"3": "eéèêEÉÈÊ",
|
| ︙ | ︙ |
Modified gc_core/py/ibdawg.py from [b7c1b974f3] to [6eb071b95f].
| ︙ | ︙ | |||
23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
fStart = time.time()
result = func(*args, **kwargs)
fEnd = time.time()
print(func.__name__, fEnd - fStart)
return result
return wrapper
class IBDAWG:
"""INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH"""
def __init__ (self, sDicName):
self.by = pkgutil.get_data(__package__, "_dictionaries/" + sDicName)
if not self.by:
| > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
fStart = time.time()
result = func(*args, **kwargs)
fEnd = time.time()
print(func.__name__, fEnd - fStart)
return result
return wrapper
class SuggResult:
def __init__ (self, sWord, nDistLimit=-1):
self.sWord = sWord
self.sCleanWord = cp.cleanWord(sWord)
self.nDistLimit = nDistLimit if nDistLimit >= 0 else (len(sWord) // 3) + 1
self.nMinDist = 1000
self.nMaxDist = 0
self.aSugg = set()
self.dSugg = { 0: [], 1: [], 2: [] }
def addSugg (self, sSugg, nDeep=0):
"add a suggestion"
if sSugg not in self.aSugg:
nDist = st.distanceSift4(self.sCleanWord, cp.cleanWord(sSugg))
if nDist <= self.nDistLimit:
if nDist not in self.dSugg:
self.dSugg[nDist] = []
self.dSugg[nDist].append(sSugg)
logging.info((nDeep * " ") + "__" + sSugg + "__")
if nDist > self.nMaxDist:
self.nMaxDist = nDist
if nDist < self.nMinDist:
self.nMinDist = nDist
self.nDistLimit = min(self.nDistLimit, self.nMinDist+2)
def getSuggestions (self, nSuggLimit=10, nDistLimit=-1):
"return a list of suggestions"
lRes = []
for lSugg in self.dSugg.values():
lRes.extend(lSugg)
if len(lRes) > nSuggLimit:
break
lRes = list(cp.filterSugg(lRes))
if self.sWord.istitle():
lRes = list(map(lambda sSugg: sSugg.title(), lRes))
return lRes[:nSuggLimit]
class IBDAWG:
"""INDEXABLE BINARY DIRECT ACYCLIC WORD GRAPH"""
def __init__ (self, sDicName):
self.by = pkgutil.get_data(__package__, "_dictionaries/" + sDicName)
if not self.by:
|
| ︙ | ︙ | |||
210 211 212 213 214 215 216 |
if sWord.istitle():
aSugg.update(self._suggest(sWord.lower(), nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl))
aSugg = set(map(lambda sSugg: sSugg.title(), aSugg))
elif sWord.islower():
aSugg.update(self._suggest(sWord.title(), nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl))
aSugg = cp.filterSugg(aSugg)
sCleanWord = cp.cleanWord(sWord)
| | | 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
if sWord.istitle():
aSugg.update(self._suggest(sWord.lower(), nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl))
aSugg = set(map(lambda sSugg: sSugg.title(), aSugg))
elif sWord.islower():
aSugg.update(self._suggest(sWord.title(), nMaxDel=nMaxDel, nMaxHardRepl=nMaxHardRepl))
aSugg = cp.filterSugg(aSugg)
sCleanWord = cp.cleanWord(sWord)
aSugg = sorted(aSugg, key=lambda sSugg: st.distanceDamerauLevenshtein(sCleanWord, cp.cleanWord(sSugg)))[:nMaxSugg]
if sSfx or sPfx:
# we add what we removed
return list(map(lambda sSug: sPfx + sSug + sSfx, aSugg))
return aSugg
def _suggest (self, sRemain, nMaxDel=0, nMaxHardRepl=0, nDeep=0, iAddr=0, sNewWord="", sAction="", bAvoidLoop=False):
"returns a set of suggestions"
|
| ︙ | ︙ | |||
268 269 270 271 272 273 274 |
aSugg.update(self._suggest(sRepl, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, sRemain + " >> " + sRepl, True))
return aSugg
@timethis
def suggest2 (self, sWord, nMaxSugg=10):
"returns a set of suggestions for <sWord>"
sPfx, sWord, sSfx = cp.cut(sWord)
| | < | < < < < < | < | | | | < < < | | | | 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
aSugg.update(self._suggest(sRepl, nMaxDel, nMaxHardRepl, nDeep+1, iAddr, sNewWord, sRemain + " >> " + sRepl, True))
return aSugg
@timethis
def suggest2 (self, sWord, nMaxSugg=10):
"returns a set of suggestions for <sWord>"
sPfx, sWord, sSfx = cp.cut(sWord)
oSuggResult = SuggResult(sWord)
self._suggest2(oSuggResult)
aSugg = oSuggResult.getSuggestions()
if sSfx or sPfx:
# we add what we removed
return list(map(lambda sSug: sPfx + sSug + sSfx, aSugg))
return aSugg
def _suggest2 (self, oSuggResult, nDeep=0, iAddr=0, sNewWord=""):
#logging.info((nDeep * " ") + sNewWord)
if nDeep >= oSuggResult.nDistLimit:
sCleanNewWord = cp.cleanWord(sNewWord)
if st.distanceSift4(oSuggResult.sCleanWord[:len(sCleanNewWord)], sCleanNewWord) > oSuggResult.nDistLimit:
return
if int.from_bytes(self.byDic[iAddr:iAddr+self.nBytesArc], byteorder='big') & self._finalNodeMask:
#logging.info((nDeep * " ") + "__" + sNewWord + "__")
oSuggResult.addSugg(sNewWord, nDeep)
for cChar, jAddr in self._getCharArcs(iAddr):
self._suggest2(oSuggResult, nDeep+1, jAddr, sNewWord+cChar)
return
def _getCharArcs (self, iAddr):
"generator: yield all chars and addresses from node at address <iAddr>"
for nVal, jAddr in self._getArcs(iAddr):
if nVal < self.nChar:
yield (self.dCharVal[nVal], jAddr)
|
| ︙ | ︙ |
Modified gc_core/py/str_transform.py from [23bc31f0e4] to [3d25d1270b].
1 2 3 4 5 6 7 8 9 |
#!python3
## No stemming
def noStemming (sFlex, sStem):
return sStem
def rebuildWord (sFlex, cmd1, cmd2):
if cmd1 == "_":
| > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
#!python3
#### DISTANCE CALCULATIONS
def longestCommonSubstring (s1, s2):
# http://en.wikipedia.org/wiki/Longest_common_substring_problem
# http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Longest_common_substring
M = [ [0]*(1+len(s2)) for i in range(1+len(s1)) ]
longest, x_longest = 0, 0
for x in range(1, 1+len(s1)):
for y in range(1, 1+len(s2)):
if s1[x-1] == s2[y-1]:
M[x][y] = M[x-1][y-1] + 1
if M[x][y] > longest:
longest = M[x][y]
x_longest = x
else:
M[x][y] = 0
return s1[x_longest-longest : x_longest]
def distanceDamerauLevenshtein (s1, s2):
"distance of Damerau-Levenshtein between <s1> and <s2>"
# https://fr.wikipedia.org/wiki/Distance_de_Damerau-Levenshtein
d = {}
nLen1 = len(s1)
nLen2 = len(s2)
for i in range(-1, nLen1+1):
d[i, -1] = i + 1
for j in range(-1, nLen2+1):
d[-1, j] = j + 1
for i in range(nLen1):
for j in range(nLen2):
nCost = 0 if s1[i] == s2[j] else 1
d[i, j] = min(
d[i-1, j] + 1, # Deletion
d[i, j-1] + 1, # Insertion
d[i-1, j-1] + nCost, # Substitution
)
if i and j and s1[i] == s2[j-1] and s1[i-1] == s2[j]:
d[i, j] = min(d[i, j], d[i-2, j-2] + nCost) # Transposition
return d[nLen1-1, nLen2-1]
def distanceSift4 (s1, s2, nMaxOffset=5):
"implementation of general Sift4."
# https://siderite.blogspot.com/2014/11/super-fast-and-accurate-string-distance.html
if not s1:
return len(s2)
if not s2:
return len(s1)
nLen1, nLen2 = len(s1), len(s2)
i1, i2 = 0, 0 # Cursors for each string
nLargestCS = 0 # Largest common substring
nLocalCS = 0 # Local common substring
nTrans = 0 # Number of transpositions ('ab' vs 'ba')
lOffset = [] # Offset pair array, for computing the transpositions
while i1 < nLen1 and i2 < nLen2:
if s1[i1] == s2[i2]:
nLocalCS += 1
# Check if current match is a transposition
bTrans = False
i = 0
while i < len(lOffset):
t = lOffset[i]
if i1 <= t[0] or i2 <= t[1]:
bTrans = abs(i2-i1) >= abs(t[1] - t[0])
if bTrans:
nTrans += 1
elif not t[2]:
t[2] = True
nTrans += 1
break
elif i1 > t[1] and i2 > t[0]:
del lOffset[i]
else:
i += 1
lOffset.append([i1, i2, bTrans])
else:
nLargestCS += nLocalCS
nLocalCS = 0
if i1 != i2:
i1 = i2 = min(i1, i2)
for i in range(nMaxOffset):
if i1 + i >= nLen1 and i2 + i >= nLen2:
break
elif i1 + i < nLen1 and s1[i1+i] == s2[i2]:
i1 += i - 1
i2 -= 1
break
elif i2 + i < nLen2 and s1[i1] == s2[i2+i]:
i2 += i - 1
i1 -= 1
break
i1 += 1
i2 += 1
if i1 >= nLen1 or i2 >= nLen2:
nLargestCS += nLocalCS
nLocalCS = 0
i1 = i2 = min(i1, i2)
nLargestCS += nLocalCS
return round(max(nLen1, nLen2) - nLargestCS + nTrans)
def showDistance (s1, s2):
print(f"Damerau-Levenshtein: {s1} / {s2} = {distanceDamerauLevenshtein(s1, s2)}")
print(f"Sift4: {s1} / {s2} = {distanceSift4(s1, s2)}")
#### STEMMING OPERATIONS
## No stemming
def noStemming (sFlex, sStem):
return sStem
def rebuildWord (sFlex, cmd1, cmd2):
if cmd1 == "_":
|
| ︙ | ︙ | |||
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
return "0"
jSfx = 0
for i in range(min(len(sFlex), len(sStem))):
if sFlex[i] != sStem[i]:
break
jSfx += 1
return chr(len(sFlex)-jSfx+48) + sStem[jSfx:]
def changeWordWithSuffixCode (sWord, sSfxCode):
if sSfxCode == "0":
return sWord
return sWord[:-(ord(sSfxCode[0])-48)] + sSfxCode[1:] if sSfxCode[0] != '0' else sWord + sSfxCode[1:]
# Prefix and suffix
def defineAffixCode (sFlex, sStem):
""" Returns a string defining how to get stem from flexion. Examples:
"0" if stem = flexion
"stem" if no common substring
"n(pfx)/m(sfx)"
with n and m: chars with numeric meaning, "0" = 0, "1" = 1, ... ":" = 10, etc. (See ASCII table.) Says how many letters to strip from flexion.
pfx [optional]: string to add before the flexion
| > > | 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
return "0"
jSfx = 0
for i in range(min(len(sFlex), len(sStem))):
if sFlex[i] != sStem[i]:
break
jSfx += 1
return chr(len(sFlex)-jSfx+48) + sStem[jSfx:]
def changeWordWithSuffixCode (sWord, sSfxCode):
if sSfxCode == "0":
return sWord
return sWord[:-(ord(sSfxCode[0])-48)] + sSfxCode[1:] if sSfxCode[0] != '0' else sWord + sSfxCode[1:]
# Prefix and suffix
def defineAffixCode (sFlex, sStem):
""" Returns a string defining how to get stem from flexion. Examples:
"0" if stem = flexion
"stem" if no common substring
"n(pfx)/m(sfx)"
with n and m: chars with numeric meaning, "0" = 0, "1" = 1, ... ":" = 10, etc. (See ASCII table.) Says how many letters to strip from flexion.
pfx [optional]: string to add before the flexion
|
| ︙ | ︙ | |||
72 73 74 75 76 77 78 |
n = sFlex.find(sSubs)
m = len(sFlex) - (len(sSubs)+n)
sAff = "{}/".format(chr(n+48)) if not sPfx else "{}{}/".format(chr(n+48), sPfx)
sAff += chr(m+48) if not sSfx else "{}{}".format(chr(m+48), sSfx)
return sAff
return sStem
| < < < < < < < < < < < < < < < | 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
n = sFlex.find(sSubs)
m = len(sFlex) - (len(sSubs)+n)
sAff = "{}/".format(chr(n+48)) if not sPfx else "{}{}/".format(chr(n+48), sPfx)
sAff += chr(m+48) if not sSfx else "{}{}".format(chr(m+48), sSfx)
return sAff
return sStem
def changeWordWithAffixCode (sWord, sAffCode):
if sAffCode == "0":
return sWord
if '/' not in sAffCode:
return "# error #"
sPfxCode, sSfxCode = sAffCode.split('/')
sWord = sPfxCode[1:] + sWord[(ord(sPfxCode[0])-48):]
return sWord[:-(ord(sSfxCode[0])-48)] + sSfxCode[1:] if sSfxCode[0] != '0' else sWord + sSfxCode[1:]
|