︙ | | | ︙ | |
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
def readFile (spf):
print(" < Read lexicon: " + spf)
if os.path.isfile(spf):
with open(spf, "r", encoding="utf-8") as hSrc:
for sLine in hSrc:
sLine = sLine.strip()
if sLine and not sLine.startswith("#"):
yield sLine
else:
raise OSError("# Error. File not found or not loadable: " + spf)
class DAWG:
"""DIRECT ACYCLIC WORD GRAPH"""
# This code is inspired from Steve Hanov’s DAWG, 2011. (http://stevehanov.ca/blog/index.php?id=115)
# We store suffix/affix codes and tags within the graph after the “real” word.
# A word is a list of numbers [ c1, c2, c3 . . . cN, iAffix, iTags]
# Each arc is an index in self.lArcVal, where are stored characters, suffix/affix codes for stemming and tags.
# Important: As usual, the last node (after ‘iTags’) is tagged final, AND the node after ‘cN’ is ALSO tagged final.
def __init__ (self, spfSrc, cStemming, sLangCode, sLangName="", sDicName=""):
print("===== Direct Acyclic Word Graph - Minimal Acyclic Finite State Automaton =====")
cStemming = cStemming.upper()
if cStemming == "A":
funcStemmingGen = st.defineAffixCode
elif cStemming == "S":
funcStemmingGen = st.defineSuffixCode
elif cStemming == "N":
funcStemmingGen = st.noStemming
else:
raise ValueError("# Error. Unknown stemming code: {}".format(cStemming))
lEntry = []
lChar = ['']; dChar = {}; nChar = 1; dCharOccur = {}
lAff = []; dAff = {}; nAff = 0; dAffOccur = {}
lTag = []; dTag = {}; nTag = 0; dTagOccur = {}
nErr = 0
# read lexicon
for sLine in readFile(spfSrc):
sFlex, sStem, sTag = sLine.split("\t")
addWordToCharDict(sFlex)
# chars
for c in sFlex:
if c not in dChar:
dChar[c] = nChar
lChar.append(c)
nChar += 1
dCharOccur[c] = dCharOccur.get(c, 0) + 1
# affixes to find stem from flexion
aff = funcStemmingGen(sFlex, sStem)
if aff not in dAff:
dAff[aff] = nAff
lAff.append(aff)
nAff += 1
dAffOccur[aff] = dCharOccur.get(aff, 0) + 1
# tags
if sTag not in dTag:
dTag[sTag] = nTag
lTag.append(sTag)
nTag += 1
dTagOccur[sTag] = dTagOccur.get(sTag, 0) + 1
lEntry.append((sFlex, dAff[aff], dTag[sTag]))
if not lEntry:
raise ValueError("# Error. Empty lexicon")
# Preparing DAWG
print(" > Preparing list of words")
lVal = lChar + lAff + lTag
lWord = [ [dChar[c] for c in sFlex] + [iAff+nChar] + [iTag+nChar+nAff] for sFlex, iAff, iTag in lEntry ]
lEntry = None
# Dictionary of arc values occurrency, to sort arcs of each node
dValOccur = dict( [ (dChar[c], dCharOccur[c]) for c in dChar ] \
+ [ (dAff[aff]+nChar, dAffOccur[aff]) for aff in dAff ] \
+ [ (dTag[tag]+nChar+nAff, dTagOccur[tag]) for tag in dTag ] )
#with open(spfSrc[:-8]+".valuesfreq.txt", 'w', encoding='utf-8') as hFreqDst: # DEBUG
# for iKey, nOcc in sorted(dValOccur.items(), key=lambda t: t[1], reverse=True):
# hFreqDst.write("{}: {}\n".format(lVal[iKey], nOcc))
# hFreqDst.close()
self.sFileName = spfSrc
self.sLangCode = sLangCode
self.sLangName = sLangName
self.sDicName = sDicName
self.nEntry = len(lWord)
self.aPreviousEntry = []
DawgNode.resetNextId()
self.oRoot = DawgNode()
|
|
|
|
|
>
|
>
>
|
|
|
|
|
|
|
|
|
|
<
<
<
<
|
|
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
|
def readFile (spf):
print(" < Read lexicon: " + spf)
if os.path.isfile(spf):
with open(spf, "r", encoding="utf-8") as hSrc:
for sLine in hSrc:
sLine = sLine.strip()
if sLine and not sLine.startswith("#"):
yield sLine.split("\t")
else:
raise OSError("# Error. File not found or not loadable: " + spf)
class DAWG:
"""DIRECT ACYCLIC WORD GRAPH"""
# This code is inspired from Steve Hanov’s DAWG, 2011. (http://stevehanov.ca/blog/index.php?id=115)
# We store suffix/affix codes and tags within the graph after the “real” word.
# A word is a list of numbers [ c1, c2, c3 . . . cN, iAffix, iTags]
# Each arc is an index in self.lArcVal, where are stored characters, suffix/affix codes for stemming and tags.
# Important: As usual, the last node (after ‘iTags’) is tagged final, AND the node after ‘cN’ is ALSO tagged final.
def __init__ (self, src, cStemming, sLangCode, sLangName="", sDicName=""):
print("===== Direct Acyclic Word Graph - Minimal Acyclic Finite State Automaton =====")
cStemming = cStemming.upper()
if cStemming == "A":
funcStemmingGen = st.defineAffixCode
elif cStemming == "S":
funcStemmingGen = st.defineSuffixCode
elif cStemming == "N":
funcStemmingGen = st.noStemming
else:
raise ValueError("# Error. Unknown stemming code: {}".format(cStemming))
aEntry = set()
lChar = ['']; dChar = {}; nChar = 1; dCharOccur = {}
lAff = []; dAff = {}; nAff = 0; dAffOccur = {}
lTag = []; dTag = {}; nTag = 0; dTagOccur = {}
nErr = 0
# read lexicon
if type(src) is str:
iterable = readFile(src)
else:
iterable = src
for sFlex, sStem, sTag in iterable:
addWordToCharDict(sFlex)
# chars
for c in sFlex:
if c not in dChar:
dChar[c] = nChar
lChar.append(c)
nChar += 1
dCharOccur[c] = dCharOccur.get(c, 0) + 1
# affixes to find stem from flexion
sAff = funcStemmingGen(sFlex, sStem)
if sAff not in dAff:
dAff[sAff] = nAff
lAff.append(sAff)
nAff += 1
dAffOccur[sAff] = dCharOccur.get(sAff, 0) + 1
# tags
if sTag not in dTag:
dTag[sTag] = nTag
lTag.append(sTag)
nTag += 1
dTagOccur[sTag] = dTagOccur.get(sTag, 0) + 1
aEntry.add((sFlex, dAff[sAff], dTag[sTag]))
if not aEntry:
raise ValueError("# Error. Empty lexicon")
# Preparing DAWG
print(" > Preparing list of words")
lVal = lChar + lAff + lTag
lWord = [ [dChar[c] for c in sFlex] + [iAff+nChar] + [iTag+nChar+nAff] for sFlex, iAff, iTag in aEntry ]
aEntry = None
# Dictionary of arc values occurrency, to sort arcs of each node
dValOccur = dict( [ (dChar[c], dCharOccur[c]) for c in dChar ] \
+ [ (dAff[aff]+nChar, dAffOccur[aff]) for aff in dAff ] \
+ [ (dTag[tag]+nChar+nAff, dTagOccur[tag]) for tag in dTag ] )
self.sFileName = src if type(src) is str else "[None]"
self.sLangCode = sLangCode
self.sLangName = sLangName
self.sDicName = sDicName
self.nEntry = len(lWord)
self.aPreviousEntry = []
DawgNode.resetNextId()
self.oRoot = DawgNode()
|
︙ | | | ︙ | |
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
|
sEntry = sWord + "\t" + self.funcStemming(sWord, self.lArcVal[nVal])
for nMorphVal, _ in oNextNode.arcs.items():
if not zPattern or zPattern.search(self.lArcVal[nMorphVal]):
yield sEntry + "\t" + self.lArcVal[nMorphVal]
# BINARY CONVERSION
def createBinary (self, sPathFile, nCompressionMethod, bDebug=False):
print(" > Write DAWG as an indexable binary dictionary [method: %d]" % nCompressionMethod)
if nCompressionMethod == 1:
self.nBytesArc = ( (self.nArcVal.bit_length() + 2) // 8 ) + 1 # We add 2 bits. See DawgNode.convToBytes1()
self.nBytesOffset = 0
self._calcNumBytesNodeAddress()
self._calcNodesAddress1()
elif nCompressionMethod == 2:
|
|
|
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
|
sEntry = sWord + "\t" + self.funcStemming(sWord, self.lArcVal[nVal])
for nMorphVal, _ in oNextNode.arcs.items():
if not zPattern or zPattern.search(self.lArcVal[nMorphVal]):
yield sEntry + "\t" + self.lArcVal[nMorphVal]
# BINARY CONVERSION
def _calculateBinary (self, nCompressionMethod):
print(" > Write DAWG as an indexable binary dictionary [method: %d]" % nCompressionMethod)
if nCompressionMethod == 1:
self.nBytesArc = ( (self.nArcVal.bit_length() + 2) // 8 ) + 1 # We add 2 bits. See DawgNode.convToBytes1()
self.nBytesOffset = 0
self._calcNumBytesNodeAddress()
self._calcNodesAddress1()
elif nCompressionMethod == 2:
|
︙ | | | ︙ | |
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
|
self._calcNodesAddress3()
else:
print(" # Error: unknown compression method")
print(" Arc values (chars, affixes and tags): {} -> {} bytes".format( self.nArcVal, len("\t".join(self.lArcVal).encode("utf-8")) ))
print(" Arc size: {} bytes, Address size: {} bytes -> {} * {} = {} bytes".format( self.nBytesArc, self.nBytesNodeAddress, \
self.nBytesArc+self.nBytesNodeAddress, self.nArc, \
(self.nBytesArc+self.nBytesNodeAddress)*self.nArc ))
self._writeBinary(sPathFile, nCompressionMethod)
self._writeAsJSObject(sPathFile, nCompressionMethod)
if bDebug:
self._writeNodes(sPathFile, nCompressionMethod)
def _calcNumBytesNodeAddress (self):
"how many bytes needed to store all nodes/arcs in the binary dictionary"
self.nBytesNodeAddress = 1
while ((self.nBytesArc + self.nBytesNodeAddress) * self.nArc) > (2 ** (self.nBytesNodeAddress * 8)):
self.nBytesNodeAddress += 1
|
<
<
<
<
|
329
330
331
332
333
334
335
336
337
338
339
340
341
342
|
self._calcNodesAddress3()
else:
print(" # Error: unknown compression method")
print(" Arc values (chars, affixes and tags): {} -> {} bytes".format( self.nArcVal, len("\t".join(self.lArcVal).encode("utf-8")) ))
print(" Arc size: {} bytes, Address size: {} bytes -> {} * {} = {} bytes".format( self.nBytesArc, self.nBytesNodeAddress, \
self.nBytesArc+self.nBytesNodeAddress, self.nArc, \
(self.nBytesArc+self.nBytesNodeAddress)*self.nArc ))
def _calcNumBytesNodeAddress (self):
"how many bytes needed to store all nodes/arcs in the binary dictionary"
self.nBytesNodeAddress = 1
while ((self.nBytesArc + self.nBytesNodeAddress) * self.nArc) > (2 ** (self.nBytesNodeAddress * 8)):
self.nBytesNodeAddress += 1
|
︙ | | | ︙ | |
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
|
for oNextNode in self.lSortedNodes[i].arcs.values():
if 1 < (oNextNode.addr - self.lSortedNodes[i].addr) < self.nMaxOffset:
nSize -= nDiff
if self.lSortedNodes[i].size != nSize:
self.lSortedNodes[i].size = nSize
bEnd = False
def _writeAsJSObject (self, spfDst, nCompressionMethod, bInJSModule=False, bBinaryDictAsHexString=True):
if not spfDst.endswith(".json"):
spfDst += "."+str(nCompressionMethod)+".json"
byDic = b""
if nCompressionMethod == 1:
byDic = self.oRoot.convToBytes1(self.nBytesArc, self.nBytesNodeAddress)
for oNode in self.lMinimizedNodes:
byDic += oNode.convToBytes1(self.nBytesArc, self.nBytesNodeAddress)
elif nCompressionMethod == 2:
byDic = self.oRoot.convToBytes2(self.nBytesArc, self.nBytesNodeAddress)
for oNode in self.lSortedNodes:
byDic += oNode.convToBytes2(self.nBytesArc, self.nBytesNodeAddress)
elif nCompressionMethod == 3:
byDic = self.oRoot.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset)
for oNode in self.lSortedNodes:
byDic += oNode.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset)
with open(spfDst, "w", encoding="utf-8", newline="\n") as hDst:
if bInJSModule:
hDst.write('// JavaScript\n// Generated data (do not edit)\n\n"use strict";\n\nconst dictionary = ')
hDst.write(json.dumps({
"sHeader": "/pyfsa/",
"sLangCode": self.sLangCode,
"sLangName": self.sLangName,
"sDicName": self.sDicName,
"sFileName": self.sFileName,
"sDate": self._getDate(),
"nEntry": self.nEntry,
"nChar": self.nChar,
"nAff": self.nAff,
"nTag": self.nTag,
"cStemming": self.cStemming,
"dChar": self.dChar,
"nNode": self.nNode,
"nArc": self.nArc,
"nArcVal": self.nArcVal,
"lArcVal": self.lArcVal,
"nCompressionMethod": nCompressionMethod,
"nBytesArc": self.nBytesArc,
"nBytesNodeAddress": self.nBytesNodeAddress,
"nBytesOffset": self.nBytesOffset,
# JavaScript is a pile of shit, so Mozilla’s JS parser don’t like file bigger than 4 Mb!
# So, if necessary, we use an hexadecimal string, that we will convert later in Firefox’s extension.
# https://github.com/mozilla/addons-linter/issues/1361
"sByDic": byDic.hex() if bBinaryDictAsHexString else [ e for e in byDic ]
}, ensure_ascii=False))
if bInJSModule:
hDst.write(";\n\nexports.dictionary = dictionary;\n")
def _writeBinary (self, sPathFile, nCompressionMethod):
"""
Format of the binary indexable dictionary:
Each section is separated with 4 bytes of \0
- Section Header:
/pyfsa/[compression method]
* compression method is an ASCII string
|
|
<
|
|
<
<
<
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
|
>
>
>
>
>
>
>
|
|
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
|
for oNextNode in self.lSortedNodes[i].arcs.values():
if 1 < (oNextNode.addr - self.lSortedNodes[i].addr) < self.nMaxOffset:
nSize -= nDiff
if self.lSortedNodes[i].size != nSize:
self.lSortedNodes[i].size = nSize
bEnd = False
def getBinaryAsJSON (self, nCompressionMethod=1, bBinaryDictAsHexString=True):
self._calculateBinary(nCompressionMethod)
byDic = b""
if nCompressionMethod == 1:
byDic = self.oRoot.convToBytes1(self.nBytesArc, self.nBytesNodeAddress)
for oNode in self.lMinimizedNodes:
byDic += oNode.convToBytes1(self.nBytesArc, self.nBytesNodeAddress)
elif nCompressionMethod == 2:
byDic = self.oRoot.convToBytes2(self.nBytesArc, self.nBytesNodeAddress)
for oNode in self.lSortedNodes:
byDic += oNode.convToBytes2(self.nBytesArc, self.nBytesNodeAddress)
elif nCompressionMethod == 3:
byDic = self.oRoot.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset)
for oNode in self.lSortedNodes:
byDic += oNode.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset)
return {
"sHeader": "/pyfsa/",
"sLangCode": self.sLangCode,
"sLangName": self.sLangName,
"sDicName": self.sDicName,
"sFileName": self.sFileName,
"sDate": self._getDate(),
"nEntry": self.nEntry,
"nChar": self.nChar,
"nAff": self.nAff,
"nTag": self.nTag,
"cStemming": self.cStemming,
"dChar": self.dChar,
"nNode": self.nNode,
"nArc": self.nArc,
"nArcVal": self.nArcVal,
"lArcVal": self.lArcVal,
"nCompressionMethod": nCompressionMethod,
"nBytesArc": self.nBytesArc,
"nBytesNodeAddress": self.nBytesNodeAddress,
"nBytesOffset": self.nBytesOffset,
# Mozilla’s JS parser don’t like file bigger than 4 Mb!
# So, if necessary, we use an hexadecimal string, that we will convert later in Firefox’s extension.
# https://github.com/mozilla/addons-linter/issues/1361
"sByDic": byDic.hex() if bBinaryDictAsHexString else [ e for e in byDic ]
}
def writeAsJSObject (self, spfDst, nCompressionMethod, bInJSModule=False, bBinaryDictAsHexString=True):
if not spfDst.endswith(".json"):
spfDst += "."+str(nCompressionMethod)+".json"
with open(spfDst, "w", encoding="utf-8", newline="\n") as hDst:
if bInJSModule:
hDst.write('// JavaScript\n// Generated data (do not edit)\n\n"use strict";\n\nconst dictionary = ')
hDst.write( json.dumps(self.getBinaryAsJSON(nCompressionMethod, bBinaryDictAsHexString), ensure_ascii=False) )
if bInJSModule:
hDst.write(";\n\nexports.dictionary = dictionary;\n")
def writeBinary (self, sPathFile, nCompressionMethod, bDebug=False):
"""
Format of the binary indexable dictionary:
Each section is separated with 4 bytes of \0
- Section Header:
/pyfsa/[compression method]
* compression method is an ASCII string
|
︙ | | | ︙ | |
469
470
471
472
473
474
475
476
477
478
479
480
481
482
|
- Section Values:
* a list of strings encoded in binary from utf-8, each value separated with a tabulation
- Section Word Graph (nodes / arcs)
* A list of nodes which are a list of arcs with an address of the next node.
See DawgNode.convToBytes() for details.
"""
if not sPathFile.endswith(".bdic"):
sPathFile += "."+str(nCompressionMethod)+".bdic"
with open(sPathFile, 'wb') as hDst:
# header
hDst.write("/pyfsa/{}/".format(nCompressionMethod).encode("utf-8"))
hDst.write(b"\0\0\0\0")
# infos
|
>
|
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
|
- Section Values:
* a list of strings encoded in binary from utf-8, each value separated with a tabulation
- Section Word Graph (nodes / arcs)
* A list of nodes which are a list of arcs with an address of the next node.
See DawgNode.convToBytes() for details.
"""
self._calculateBinary(nCompressionMethod)
if not sPathFile.endswith(".bdic"):
sPathFile += "."+str(nCompressionMethod)+".bdic"
with open(sPathFile, 'wb') as hDst:
# header
hDst.write("/pyfsa/{}/".format(nCompressionMethod).encode("utf-8"))
hDst.write(b"\0\0\0\0")
# infos
|
︙ | | | ︙ | |
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
|
hDst.write(self.oRoot.convToBytes2(self.nBytesArc, self.nBytesNodeAddress))
for oNode in self.lSortedNodes:
hDst.write(oNode.convToBytes2(self.nBytesArc, self.nBytesNodeAddress))
elif nCompressionMethod == 3:
hDst.write(self.oRoot.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset))
for oNode in self.lSortedNodes:
hDst.write(oNode.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset))
hDst.close()
def _getDate (self):
return time.strftime("%Y.%m.%d, %H:%M")
def _writeNodes (self, sPathFile, nCompressionMethod):
"for debugging only"
print(" > Write nodes")
|
|
>
|
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
|
hDst.write(self.oRoot.convToBytes2(self.nBytesArc, self.nBytesNodeAddress))
for oNode in self.lSortedNodes:
hDst.write(oNode.convToBytes2(self.nBytesArc, self.nBytesNodeAddress))
elif nCompressionMethod == 3:
hDst.write(self.oRoot.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset))
for oNode in self.lSortedNodes:
hDst.write(oNode.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset))
if bDebug:
self._writeNodes(sPathFile, nCompressionMethod)
def _getDate (self):
return time.strftime("%Y.%m.%d, %H:%M")
def _writeNodes (self, sPathFile, nCompressionMethod):
"for debugging only"
print(" > Write nodes")
|
︙ | | | ︙ | |
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
|
for oNode in self.lSortedNodes:
hDst.write(oNode.getTxtRepr2(self.nBytesArc, self.nBytesNodeAddress, self.lArcVal)+"\n")
if nCompressionMethod == 3:
hDst.write(self.oRoot.getTxtRepr3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset, self.lArcVal)+"\n")
#hDst.write( ''.join( [ "%02X " % z for z in self.oRoot.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset) ] ).strip() )
for oNode in self.lSortedNodes:
hDst.write(oNode.getTxtRepr3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset, self.lArcVal)+"\n")
hDst.close()
def writeResults (self, sPathFile):
bFileExits = os.path.isfile("_lexicons.res.txt")
with open("_lexicons.res.txt", "a", encoding='utf-8', newline="\n") as hDst:
sFormat1 = "{:<12} {:>12} {:>5} {:>8} {:>8} {:>6} {:>8} {:>9} {:>9} {:>15} {:>12} {:>12}\n"
sFormat2 = "{:<12} {:>12,} {:>5,} {:>8,} {:>8} {:>6,} {:>8,} {:>9,} {:>9,} {:>15,} {:>12,} {:>12,}\n"
if not bFileExits:
hDst.write(sFormat1.format("Lexicon", "Entries", "Chars", "Affixes", "Stemming", "Tags", "Values", "Nodes", "Arcs", "Lexicon (Kb)", "Dict (Kb)", "LT Dict (Kb)"))
hDst.write(sFormat2.format(self.sLangName, self.nEntry, self.nChar, self.nAff, self.cStemming + "FX", self.nTag, self.nArcVal, \
self.nNode, self.nArc, os.path.getsize(self.sFileName), os.path.getsize(sPathFile), \
os.path.getsize("cfsa/dict/{}.dict".format(self.sLangName)) if os.path.isfile("cfsa/dict/{}.dict".format(self.sLangName)) else 0))
hDst.close()
class DawgNode:
NextId = 0
NextPos = 1 # (version 2)
|
<
<
<
<
<
<
<
<
<
<
<
<
<
|
520
521
522
523
524
525
526
527
528
529
530
531
532
533
|
for oNode in self.lSortedNodes:
hDst.write(oNode.getTxtRepr2(self.nBytesArc, self.nBytesNodeAddress, self.lArcVal)+"\n")
if nCompressionMethod == 3:
hDst.write(self.oRoot.getTxtRepr3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset, self.lArcVal)+"\n")
#hDst.write( ''.join( [ "%02X " % z for z in self.oRoot.convToBytes3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset) ] ).strip() )
for oNode in self.lSortedNodes:
hDst.write(oNode.getTxtRepr3(self.nBytesArc, self.nBytesNodeAddress, self.nBytesOffset, self.lArcVal)+"\n")
class DawgNode:
NextId = 0
NextPos = 1 # (version 2)
|
︙ | | | ︙ | |