Overview
| Comment: | [core] token offset for correct token positioning |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
38cd64c0b91a460f8ceec8c788a33aab |
| User & Date: | olr on 2018-06-02 14:01:06 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-06-02
| ||
| 15:21 | [build][core][bug] generate rules with multiple tokens check-in: 667266b248 user: olr tags: core, build, rg | |
| 14:01 | [core] token offset for correct token positioning check-in: 38cd64c0b9 user: olr tags: core, rg | |
| 13:47 | [graphspell] tokenizer: add option for <start> and <end> tokens check-in: 3339da6424 user: olr tags: graphspell, rg | |
Changes
Modified gc_core/py/lang_core/gc_engine.py from [c9f32df1df] to [362b18f218].
| ︙ | |||
272 273 274 275 276 277 278 | 272 273 274 275 276 277 278 279 280 281 282 283 284 285 | - |
else:
xErr.aProperties = ()
return xErr
def _createTokenDictError (lToken, sSentence, sSentence0, sRepl, iFirstToken, nStart, nEnd, sLineId, sRuleId, bUppercase, sMsg, sURL, bIdRule, sOption, bContext):
"error as a dictionary"
|
| ︙ | |||
670 671 672 673 674 675 676 | 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 | - + |
class TokenSentence:
def __init__ (self, sSentence, sSentence0, iStart):
self.sSentence = sSentence
self.sSentence0 = sSentence0
self.iStart = iStart
|
| ︙ | |||
727 728 729 730 731 732 733 | 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 | - + - + + + - - + + - + |
bFirst = False
bValid = True
if not bValid:
del lPointer[i]
lPointer.extend(lNewPointer)
# check arcs of first nodes
for dNode in self._getNextMatchingNodes(dToken, dGraph[0]):
|
| ︙ |