︙ | | | ︙ | |
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
|
s = "===== TEXT =====\n"
s += "sentence: " + self.sSentence0 + "\n"
s += "now: " + self.sSentence + "\n"
for dToken in self.lToken:
s += '#{i}\t{nStart}:{nEnd}\t{sValue}\t{sType}'.format(**dToken)
if "lMorph" in dToken:
s += "\t" + str(dToken["lMorph"])
if "tags" in dToken:
s += "\t" + str(dToken["tags"])
s += "\n"
#for nPos, dToken in self.dTokenPos.items():
# s += "{}\t{}\n".format(nPos, dToken)
return s
def parse (self, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False):
"analyses the paragraph sText and returns list of errors"
|
|
|
|
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
|
s = "===== TEXT =====\n"
s += "sentence: " + self.sSentence0 + "\n"
s += "now: " + self.sSentence + "\n"
for dToken in self.lToken:
s += '#{i}\t{nStart}:{nEnd}\t{sValue}\t{sType}'.format(**dToken)
if "lMorph" in dToken:
s += "\t" + str(dToken["lMorph"])
if "aTags" in dToken:
s += "\t" + str(dToken["aTags"])
s += "\n"
#for nPos, dToken in self.dTokenPos.items():
# s += "{}\t{}\n".format(nPos, dToken)
return s
def parse (self, sCountry="${country_default}", bDebug=False, dOptions=None, bContext=False):
"analyses the paragraph sText and returns list of errors"
|
︙ | | | ︙ | |
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
|
def update (self, sSentence, bDebug=False):
"update <sSentence> and retokenize"
self.sSentence = sSentence
lNewToken = list(_oTokenizer.genTokens(sSentence, True))
for dToken in lNewToken:
if "lMorph" in self.dTokenPos.get(dToken["nStart"], {}):
dToken["lMorph"] = self.dTokenPos[dToken["nStart"]]["lMorph"]
if "tags" in self.dTokenPos.get(dToken["nStart"], {}):
dToken["tags"] = self.dTokenPos[dToken["nStart"]]["tags"]
self.lToken = lNewToken
self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lToken if dToken["sType"] != "INFO" }
if bDebug:
echo("UPDATE:")
echo(self)
def _getNextPointers (self, dToken, dGraph, dPointer, bDebug=False):
|
|
|
|
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
|
def update (self, sSentence, bDebug=False):
"update <sSentence> and retokenize"
self.sSentence = sSentence
lNewToken = list(_oTokenizer.genTokens(sSentence, True))
for dToken in lNewToken:
if "lMorph" in self.dTokenPos.get(dToken["nStart"], {}):
dToken["lMorph"] = self.dTokenPos[dToken["nStart"]]["lMorph"]
if "aTags" in self.dTokenPos.get(dToken["nStart"], {}):
dToken["aTags"] = self.dTokenPos[dToken["nStart"]]["aTags"]
self.lToken = lNewToken
self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lToken if dToken["sType"] != "INFO" }
if bDebug:
echo("UPDATE:")
echo(self)
def _getNextPointers (self, dToken, dGraph, dPointer, bDebug=False):
|
︙ | | | ︙ | |
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
|
continue
if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_morph>"][sRegex]] }
bTokenFound = True
# token tags
if "tags" in dToken and "<tags>" in dNode:
for sTag in dToken["tags"]:
if sTag in dNode["<tags>"]:
if bDebug:
echo(" MATCH: /" + sTag)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<tags>"][sTag]] }
bTokenFound = True
# meta arc (for token type)
if "<meta>" in dNode:
|
|
|
|
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
|
continue
if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph):
if bDebug:
echo(" MATCH: @" + sRegex)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<re_morph>"][sRegex]] }
bTokenFound = True
# token tags
if "aTags" in dToken and "<tags>" in dNode:
for sTag in dToken["aTags"]:
if sTag in dNode["<tags>"]:
if bDebug:
echo(" MATCH: /" + sTag)
yield { "iNode1": iNode1, "dNode": dGraph[dNode["<tags>"][sTag]] }
bTokenFound = True
# meta arc (for token type)
if "<meta>" in dNode:
|
︙ | | | ︙ | |
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
|
echo(" COND_OK")
pass
elif cActionType == "/":
# Tag
nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0]
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
for i in range(nTokenStart, nTokenEnd+1):
if "tags" in self.lToken[i]:
self.lToken[i]["tags"].update(sWhat.split("|"))
else:
self.lToken[i]["tags"] = set(sWhat.split("|"))
if bDebug:
echo(" TAG: {} > [{}:{}]".format(sWhat, self.lToken[nTokenStart]["sValue"], self.lToken[nTokenEnd]["sValue"]))
if sWhat not in self.dTags:
self.dTags[sWhat] = [nTokenStart, nTokenStart]
else:
self.dTags[sWhat][0] = min(nTokenStart, self.dTags[sWhat][0])
self.dTags[sWhat][1] = max(nTokenEnd, self.dTags[sWhat][1])
|
|
|
|
|
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
|
echo(" COND_OK")
pass
elif cActionType == "/":
# Tag
nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0]
nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1]
for i in range(nTokenStart, nTokenEnd+1):
if "aTags" in self.lToken[i]:
self.lToken[i]["aTags"].update(sWhat.split("|"))
else:
self.lToken[i]["aTags"] = set(sWhat.split("|"))
if bDebug:
echo(" TAG: {} > [{}:{}]".format(sWhat, self.lToken[nTokenStart]["sValue"], self.lToken[nTokenEnd]["sValue"]))
if sWhat not in self.dTags:
self.dTags[sWhat] = [nTokenStart, nTokenStart]
else:
self.dTags[sWhat][0] = min(nTokenStart, self.dTags[sWhat][0])
self.dTags[sWhat][1] = max(nTokenEnd, self.dTags[sWhat][1])
|
︙ | | | ︙ | |
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
|
return False
if dToken["i"] < dTags[sTag][1]:
return True
return False
def g_tag (dToken, sTag):
return "tags" in dToken and sTag in dToken["tags"]
def g_space_between_tokens (dToken1, dToken2, nMin, nMax=None):
nSpace = dToken2["nStart"] - dToken1["nEnd"]
if nSpace < nMin:
return False
if nMax is not None and nSpace > nMax:
|
|
|
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
|
return False
if dToken["i"] < dTags[sTag][1]:
return True
return False
def g_tag (dToken, sTag):
return "aTags" in dToken and sTag in dToken["aTags"]
def g_space_between_tokens (dToken1, dToken2, nMin, nMax=None):
nSpace = dToken2["nStart"] - dToken1["nEnd"]
if nSpace < nMin:
return False
if nMax is not None and nSpace > nMax:
|
︙ | | | ︙ | |