Changes In Branch mtok Through [cb404dd67a] Excluding Merge-Ins
This is equivalent to a diff from f831e2b562 to cb404dd67a
2021-03-13
| ||
11:34 | [fr] ajustements check-in: b62d7a4807 user: olr tags: fr, mtok | |
09:53 | [core] gc engine: fix morphx() check-in: cb404dd67a user: olr tags: core, mtok | |
08:07 | [core][fr] fix getLemmaOfMorph() check-in: 392ae92308 user: olr tags: fr, core, mtok | |
2021-03-10
| ||
20:22 | [fr] faux positif check-in: 0915b69239 user: olr tags: trunk, fr | |
2021-03-09
| ||
17:20 | [misc] SublimeText syntaxic color check-in: 126b183b9d user: olr tags: misc, mtok | |
07:23 | [fr] ajustements check-in: f831e2b562 user: olr tags: trunk, fr | |
2021-03-06
| ||
23:10 | [fr] ajustements check-in: 199d498ccf user: olr tags: trunk, fr | |
Modified compile_rules_graph.py from [82cd1181fb] to [523cbe02bb].
︙ | ︙ | |||
34 35 36 37 38 39 40 | def rewriteCode (sCode): "convert simple code syntax to a string of Python code" if sCode[0:1] == "=": sCode = sCode[1:] sCode = sCode.replace("__also__", "bCondMemo") sCode = sCode.replace("__else__", "not bCondMemo") sCode = sCode.replace("sContext", "_sAppContext") | | | | 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 | def rewriteCode (sCode): "convert simple code syntax to a string of Python code" if sCode[0:1] == "=": sCode = sCode[1:] sCode = sCode.replace("__also__", "bCondMemo") sCode = sCode.replace("__else__", "not bCondMemo") sCode = sCode.replace("sContext", "_sAppContext") sCode = re.sub(r"\b(morph[0x]?|morphVC|value|tag|meta|info)[(]\\(\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode) sCode = re.sub(r"\b(morph[0x]?|morphVC|value|tag|meta|info)[(]\\-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode) sCode = re.sub(r"\b(select|define|definefrom|rewrite|addmorph|setmeta)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode) sCode = re.sub(r"\b(select|define|definefrom|rewrite|addmorph|setmeta)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode) sCode = re.sub(r"\b(agreement|suggAgree)[(][\\](\d+), *[\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], lToken[nTokenOffset+\\3]', sCode) sCode = re.sub(r"\b(agreement|suggAgree)[(][\\](\d+), *[\\]-(\d+)", 'g_\\1(lToken[nTokenOffset+\\2], lToken[nLastToken-\\3+1]', sCode) sCode = re.sub(r"\b(agreement|suggAgree)[(][\\]-(\d+), *[\\](\d+)", 'g_\\1(lToken[nLastToken-\\2+1], lToken[nTokenOffset+\\3]', sCode) sCode = re.sub(r"\b(agreement|suggAgree)[(][\\]-(\d+), *[\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1], lToken[nLastToken-\\3+1]', sCode) sCode = re.sub(r"\b(tagbefore|tagafter)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], dTags', sCode) |
︙ | ︙ | |||
283 284 285 286 287 288 289 | if m: sOption = m.group(1) sAction = sAction[m.end():].strip() if nPriority == -1: nPriority = self.dOptPriority.get(sOption, 4) # valid action? | | | 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 | if m: sOption = m.group(1) sAction = sAction[m.end():].strip() if nPriority == -1: nPriority = self.dOptPriority.get(sOption, 4) # valid action? m = re.search(r"(?P<action>[-=~/!>&])(?P<start>-?\d+\.?|)(?P<end>:\.?-?\d+|)(?P<casing>:|)>>", sAction) if not m: print("\n# Error. No action found at: ", sLineId, sActionId) exit() # Condition sCondition = sAction[:m.start()].strip() if sCondition: |
︙ | ︙ | |||
371 372 373 374 375 376 377 | checkTokenNumbers(sCondition, sActionId, nToken) # check tokens in condition checkTokenNumbers(sAction, sActionId, nToken) # check tokens in action if cAction == ">": ## no action, break loop if condition is False return [sLineId, sOption, sCondition, cAction, ""] | | | 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 | checkTokenNumbers(sCondition, sActionId, nToken) # check tokens in condition checkTokenNumbers(sAction, sActionId, nToken) # check tokens in action if cAction == ">": ## no action, break loop if condition is False return [sLineId, sOption, sCondition, cAction, ""] if not sAction and cAction not in "!#": print(f"\n# Error in action at line <{sLineId}/{sActionId}>: This action is empty.") exit() if sAction[0:1] != "=" and cAction != "=": checkIfThereIsCode(sAction, sActionId) if cAction == "-": |
︙ | ︙ | |||
402 403 404 405 406 407 408 | nToken = sAction.count("|") + 1 if iStartAction > 0 and iEndAction > 0: if (iEndAction - iStartAction + 1) != nToken: print(f"\n# Error in action at line <{sLineId}/{sActionId}>: numbers of modified tokens modified.") elif iStartAction < 0 or iEndAction < 0 and iStartAction != iEndAction: print(f"\n# Warning in action at line <{sLineId}/{sActionId}>: rewriting with possible token position modified.") return [sLineId, sOption, sCondition, cAction, sAction, iStartAction, iEndAction, bCaseSensitivity] | | | 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 | nToken = sAction.count("|") + 1 if iStartAction > 0 and iEndAction > 0: if (iEndAction - iStartAction + 1) != nToken: print(f"\n# Error in action at line <{sLineId}/{sActionId}>: numbers of modified tokens modified.") elif iStartAction < 0 or iEndAction < 0 and iStartAction != iEndAction: print(f"\n# Warning in action at line <{sLineId}/{sActionId}>: rewriting with possible token position modified.") return [sLineId, sOption, sCondition, cAction, sAction, iStartAction, iEndAction, bCaseSensitivity] if cAction in "!/&": ## tags return [sLineId, sOption, sCondition, cAction, sAction, iStartAction, iEndAction] if cAction == "=": ## disambiguator sAction = self.createFunction("da", sAction) return [sLineId, sOption, sCondition, cAction, sAction] print("\n# Unknown action at ", sLineId, sActionId) |
︙ | ︙ | |||
541 542 543 544 545 546 547 | elif sLine.startswith(" ||"): # tokens line continuation iPrevLine, sPrevLine = lTokenLine[-1] lTokenLine[-1] = [iPrevLine, sPrevLine + " " + sLine.strip()[2:]] elif sLine.startswith(" <<- "): # actions lActions.append([iLine, sLine[12:].strip()]) | | | | 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | elif sLine.startswith(" ||"): # tokens line continuation iPrevLine, sPrevLine = lTokenLine[-1] lTokenLine[-1] = [iPrevLine, sPrevLine + " " + sLine.strip()[2:]] elif sLine.startswith(" <<- "): # actions lActions.append([iLine, sLine[12:].strip()]) if not re.search(r"[-=~/!>&](?:-?\d\.?(?::\.?-?\d+|)|):?>>", sLine): bActionBlock = True elif sLine.startswith(" && "): # action message iPrevLine, sPrevLine = lActions[-1] lActions[-1] = [iPrevLine, sPrevLine + sLine] elif sLine.startswith(" ") and bActionBlock: # action line continuation iPrevLine, sPrevLine = lActions[-1] lActions[-1] = [iPrevLine, sPrevLine + " " + sLine.strip()] if re.search(r"[-=~/!>&](?:-?\d\.?(?::\.?-?\d+|)|):?>>", sLine): bActionBlock = False elif re.match("[ ]*$", sLine): # empty line to end merging if not lTokenLine: continue if bActionBlock or not lActions: print("# Error. No action found at line:", iLine) |
︙ | ︙ |
Modified darg.py from [f98928fa4d] to [6cee0c2543].
︙ | ︙ | |||
212 213 214 215 216 217 218 | # Used as a key in a python dictionary. # Nodes are equivalent if they have identical arcs, and each identical arc leads to identical states. return self.__str__() == other.__str__() def getNodeAsDict (self): "returns the node as a dictionary structure" dNode = {} | | | | > > > > > | 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 | # Used as a key in a python dictionary. # Nodes are equivalent if they have identical arcs, and each identical arc leads to identical states. return self.__str__() == other.__str__() def getNodeAsDict (self): "returns the node as a dictionary structure" dNode = {} dReValue = {} # regex for token values dReMorph = {} # regex for morph dMorph = {} # simple search in morph dReMultiMorph = {} # regex for morph in multi-tokens dLemma = {} dPhonet = {} dMeta = {} dTag = {} dRule = {} for sArc, oNode in self.dArcs.items(): if sArc.startswith("@") and len(sArc) > 1: dReMorph[sArc[1:]] = oNode.__hash__() elif sArc.startswith("$") and len(sArc) > 1: dMorph[sArc[1:]] = oNode.__hash__() elif sArc.startswith("&") and len(sArc) > 1: dReMultiMorph[sArc[1:]] = oNode.__hash__() elif sArc.startswith("~") and len(sArc) > 1: dReValue[sArc[1:]] = oNode.__hash__() elif sArc.startswith(">") and len(sArc) > 1: dLemma[sArc[1:]] = oNode.__hash__() elif sArc.startswith("%") and len(sArc) > 1: dPhonet[sArc[1:]] = oNode.__hash__() elif sArc.startswith("*") and len(sArc) > 1: dMeta[sArc[1:]] = oNode.__hash__() elif sArc.startswith("/") and len(sArc) > 1: dTag[sArc[1:]] = oNode.__hash__() elif sArc.startswith("##"): dRule[sArc[1:]] = oNode.__hash__() else: dNode[sArc] = oNode.__hash__() if dReValue: dNode["<re_value>"] = dReValue if dReMorph: dNode["<re_morph>"] = dReMorph if dReMultiMorph: dNode["<re_mmorph>"] = dReMultiMorph if dMorph: dNode["<morph>"] = dMorph if dLemma: dNode["<lemmas>"] = dLemma if dPhonet: dNode["<phonet>"] = dPhonet if dTag: |
︙ | ︙ |
Modified gc_core/js/lang_core/gc_engine.js from [04cf68e8aa] to [95c9d7785e].
︙ | ︙ | |||
297 298 299 300 301 302 303 | } } else if (!sOption || option(sOption)) { for (let [zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions, lGroups, lNegLookBefore] of lRuleGroup) { if (!gc_engine.aIgnoredRules.has(sRuleId)) { while ((m = zRegex.gl_exec2(sText, lGroups, lNegLookBefore)) !== null) { let bCondMemo = null; | | | | | | 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 | } } else if (!sOption || option(sOption)) { for (let [zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions, lGroups, lNegLookBefore] of lRuleGroup) { if (!gc_engine.aIgnoredRules.has(sRuleId)) { while ((m = zRegex.gl_exec2(sText, lGroups, lNegLookBefore)) !== null) { let bCondMemo = null; for (let [sFuncCond, cActionType, sAction, ...eAct] of lActions) { // action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ] try { bCondMemo = (!sFuncCond || gc_functions[sFuncCond](sText, sText0, m, this.dTokenPos, sCountry, bCondMemo)); if (bCondMemo) { switch (cActionType) { case "-": // grammar error //console.log("-> error detected in " + sLineId + "\nzRegex: " + zRegex.source); let nErrorStart = nOffset + m.start[eAct[0]]; if (!this.dError.has(nErrorStart) || nPriority > this.dErrorPriority.get(nErrorStart)) { this.dError.set(nErrorStart, this._createErrorFromRegex(sText, sText0, sAction, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bShowRuleId, sOption, bContext)); this.dErrorPriority.set(nErrorStart, nPriority); this.dSentenceError.set(nErrorStart, this.dError.get(nErrorStart)); } break; case "~": // text processor //console.log("-> text processor by " + sLineId + "\nzRegex: " + zRegex.source); sText = this.rewriteText(sText, sAction, eAct[0], m, bUppercase); bChange = true; if (bDebug) { console.log("~ " + sText + " -- " + m[eAct[0]] + " # " + sLineId); } break; case "=": // disambiguation //console.log("-> disambiguation by " + sLineId + "\nzRegex: " + zRegex.source); gc_functions[sAction](sText, m, this.dTokenPos); if (bDebug) { console.log("= " + m[0] + " # " + sLineId, "\nDA:", this.dTokenPos); } break; case ">": // we do nothing, this test is just a condition to apply all following actions break; |
︙ | ︙ | |||
387 388 389 390 391 392 393 | } if (bDebug) { console.log("UPDATE:"); console.log(this.asString()); } } | | | 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 | } if (bDebug) { console.log("UPDATE:"); console.log(this.asString()); } } * _getNextNodes (oGraph, oToken, oNode, bKeep=false) { // generator: return matches where <oToken> “values” match <oNode> arcs try { let bTokenFound = false; // token value if (oNode.hasOwnProperty(oToken["sValue"])) { yield [" ", oToken["sValue"], oNode[oToken["sValue"]]]; bTokenFound = true; |
︙ | ︙ | |||
575 576 577 578 579 580 581 | } if (!bTokenFound && bKeep) { yield [null, "", -1]; } // JUMP // Warning! Recurssion! if (oNode.hasOwnProperty("<>")) { | | | | | > > > > > > > > | | | | > | | | | | > | | > > > > > > > > | 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 | } if (!bTokenFound && bKeep) { yield [null, "", -1]; } // JUMP // Warning! Recurssion! if (oNode.hasOwnProperty("<>")) { yield* this._getNextNodes(oGraph, oToken, oGraph[oNode["<>"]], bKeep=true); } } catch (e) { console.error(e); } } parseGraph (oGraph, sCountry="${country_default}", dOptions=null, bShowRuleId=false, bDebug=false, bContext=false) { // parse graph with tokens from the text and execute actions encountered let lPointers = []; let bTagAndRewrite = false; try { for (let [iToken, oToken] of this.lTokens.entries()) { if (bDebug) { console.log("TOKEN: " + oToken["sValue"]); } // check arcs for each existing pointer let lNextPointers = []; for (let oPointer of lPointers) { if (oPointer["nMultiEnd"] != -1) { if (oToken["i"] <= oPointer["nMultiEnd"]) { lNextPointers.push(oPointer); } if (oToken["i"] != oPointer["nMultiEnd"]) { continue; } } for (let [cNodeType, sMatch, iNode] of this._getNextNodes(oGraph, oToken, oGraph[oPointer["iNode"]])) { if (cNodeType === null) { lNextPointers.push(oPointer); continue; } if (bDebug) { console.log(" MATCH: " + cNodeType + sMatch); } let nMultiEnd = (cNodeType != "&") ? -1 : dToken["nMultiStartTo"]; lNextPointers.push({ "iToken1": oPointer["iToken1"], "iNode": iNode, "nMultiEnd": nMultiEnd }); } } lPointers = lNextPointers; // check arcs of first nodes for (let [cNodeType, sMatch, iNode] of this._getNextNodes(oGraph, oToken, oGraph[0])) { if (cNodeType === null) { continue; } if (bDebug) { console.log(" MATCH: " + cNodeType + sMatch); } let nMultiEnd = (cNodeType != "&") ? -1 : dToken["nMultiStartTo"]; lPointers.push({ "iToken1": iToken, "iNode": iNode, "nMultiEnd": nMultiEnd }); } // check if there is rules to check for each pointer for (let oPointer of lPointers) { if (oPointer["nMultiEnd"] != -1) { if (oToken["i"] < oPointer["nMultiEnd"]) { continue; } if (oToken["i"] == oPointer["nMultiEnd"]) { oPointer["nMultiEnd"] = -1; } } if (oGraph[oPointer["iNode"]].hasOwnProperty("<rules>")) { let bChange = this._executeActions(oGraph, oGraph[oPointer["iNode"]]["<rules>"], oPointer["iToken1"]-1, iToken, dOptions, sCountry, bShowRuleId, bDebug, bContext); if (bChange) { bTagAndRewrite = true; } } } |
︙ | ︙ | |||
649 650 651 652 653 654 655 | for (let [sLineId, nextNodeKey] of Object.entries(oNode)) { let bCondMemo = null; for (let sRuleId of oGraph[nextNodeKey]) { try { if (bDebug) { console.log(" >TRY: " + sRuleId + " " + sLineId); } | | > | | | | | | | | | | > > > > > > > > > > > > > > > > | | 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 | for (let [sLineId, nextNodeKey] of Object.entries(oNode)) { let bCondMemo = null; for (let sRuleId of oGraph[nextNodeKey]) { try { if (bDebug) { console.log(" >TRY: " + sRuleId + " " + sLineId); } let [_, sOption, sFuncCond, cActionType, sAction, ...eAct] = gc_rules_graph.dRule[sRuleId]; // Suggestion [ option, condition, "-", replacement/suggestion/action, iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, iURL ] // TextProcessor [ option, condition, "~", replacement/suggestion/action, iTokenStart, iTokenEnd, bCaseSvty ] // Disambiguator [ option, condition, "=", replacement/suggestion/action ] // Tag [ option, condition, "/", replacement/suggestion/action, iTokenStart, iTokenEnd ] // Immunity [ option, condition, "!", "", iTokenStart, iTokenEnd ] // Immunity [ option, condition, "&", "", iTokenStart, iTokenEnd ] // Test [ option, condition, ">", "" ] if (!sOption || dOptions.gl_get(sOption, false)) { bCondMemo = !sFuncCond || gc_functions[sFuncCond](this.lTokens, nTokenOffset, nLastToken, sCountry, bCondMemo, this.dTags, this.sSentence, this.sSentence0); if (bCondMemo) { if (cActionType == "-") { // grammar error let [iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, iURL] = eAct; let nTokenErrorStart = (iTokenStart > 0) ? nTokenOffset + iTokenStart : nLastToken + iTokenStart; if (!this.lTokens[nTokenErrorStart].hasOwnProperty("sImmunity") || (this.lTokens[nTokenErrorStart]["sImmunity"] != "*" && !this.lTokens[nTokenErrorStart]["sImmunity"].includes(sOption))) { let nTokenErrorEnd = (iTokenEnd > 0) ? nTokenOffset + iTokenEnd : nLastToken + iTokenEnd; let nErrorStart = this.nOffsetWithinParagraph + ((cStartLimit == "<") ? this.lTokens[nTokenErrorStart]["nStart"] : this.lTokens[nTokenErrorStart]["nEnd"]); let nErrorEnd = this.nOffsetWithinParagraph + ((cEndLimit == ">") ? this.lTokens[nTokenErrorEnd]["nEnd"] : this.lTokens[nTokenErrorEnd]["nStart"]); if (!this.dError.has(nErrorStart) || nPriority > this.dErrorPriority.gl_get(nErrorStart, -1)) { this.dError.set(nErrorStart, this._createErrorFromTokens(sAction, nTokenOffset, nLastToken, nTokenErrorStart, nErrorStart, nErrorEnd, sLineId, sRuleId, bCaseSvty, sMessage, gc_rules_graph.dURL[iURL], bShowRuleId, sOption, bContext)); this.dErrorPriority.set(nErrorStart, nPriority); this.dSentenceError.set(nErrorStart, this.dError.get(nErrorStart)); if (bDebug) { console.log(" NEW_ERROR: ", this.dError.get(nErrorStart)); } } } } else if (cActionType == "~") { // text processor let nTokenStart = (eAct[0] > 0) ? nTokenOffset + eAct[0] : nLastToken + eAct[0]; let nTokenEnd = (eAct[1] > 0) ? nTokenOffset + eAct[1] : nLastToken + eAct[1]; this._tagAndPrepareTokenForRewriting(sAction, nTokenStart, nTokenEnd, nTokenOffset, nLastToken, eAct[2], bDebug); bChange = true; if (bDebug) { console.log(` TEXT_PROCESSOR: [${this.lTokens[nTokenStart]["sValue"]}:${this.lTokens[nTokenEnd]["sValue"]}] > ${sAction}`); } } else if (cActionType == "=") { // disambiguation gc_functions[sAction](this.lTokens, nTokenOffset, nLastToken); if (bDebug) { console.log(` DISAMBIGUATOR: (${sAction}) [${this.lTokens[nTokenOffset+1]["sValue"]}:${this.lTokens[nLastToken]["sValue"]}]`); } } else if (cActionType == ">") { // we do nothing, this test is just a condition to apply all following actions if (bDebug) { console.log(" COND_OK"); } } else if (cActionType == "/") { // Tag let nTokenStart = (eAct[0] > 0) ? nTokenOffset + eAct[0] : nLastToken + eAct[0]; let nTokenEnd = (eAct[1] > 0) ? nTokenOffset + eAct[1] : nLastToken + eAct[1]; for (let i = nTokenStart; i <= nTokenEnd; i++) { if (this.lTokens[i].hasOwnProperty("aTags")) { this.lTokens[i]["aTags"].add(...sAction.split("|")) } else { this.lTokens[i]["aTags"] = new Set(sAction.split("|")); } } if (bDebug) { console.log(` TAG: ${sAction} > [${this.lTokens[nTokenStart]["sValue"]}:${this.lTokens[nTokenEnd]["sValue"]}]`); } for (let sTag of sAction.split("|")) { if (!this.dTags.has(sTag)) { this.dTags.set(sTag, [nTokenStart, nTokenEnd]); } else { this.dTags.set(sTag, [Math.min(nTokenStart, this.dTags.get(sTag)[0]), Math.max(nTokenEnd, this.dTags.get(sTag)[1])]); } } } else if (cActionType == "!") { // immunity if (bDebug) { console.log(" IMMUNITY: " + sLineId + " / " + sRuleId); } let nTokenStart = (eAct[0] > 0) ? nTokenOffset + eAct[0] : nLastToken + eAct[0]; let nTokenEnd = (eAct[1] > 0) ? nTokenOffset + eAct[1] : nLastToken + eAct[1]; let sImmunity = sAction || "*"; if (nTokenEnd - nTokenStart == 0) { this.lTokens[nTokenStart]["sImmunity"] = sImmunity; let nErrorStart = this.nOffsetWithinParagraph + this.lTokens[nTokenStart]["nStart"]; if (this.dError.has(nErrorStart)) { this.dError.delete(nErrorStart); } } else { for (let i = nTokenStart; i <= nTokenEnd; i++) { this.lTokens[i]["sImmunity"] = sImmunity; let nErrorStart = this.nOffsetWithinParagraph + this.lTokens[i]["nStart"]; if (this.dError.has(nErrorStart)) { this.dError.delete(nErrorStart); } } } } else if (cActionType == "#") { // multi-tokens let nTokenStart = (eAct[0] > 0) ? nTokenOffset + eAct[0] : nLastToken + eAct[0]; let nTokenEnd = (eAct[1] > 0) ? nTokenOffset + eAct[1] : nLastToken + eAct[1]; let oMultiToken = { "nTokenStart": nTokenStart, "nTokenEnd": nTokenEnd, "lTokens": this.lTokens.slice(nTokenStart, nTokenEnd+1), "lMorph": (sAction) ? sAction.split("|") : [":HM"] } this.lTokens[nTokenStart]["nMultiStartTo"] = nTokenEnd; this.lTokens[nTokenEnd]["nMultiEndFrom"] = nTokenStart; this.lTokens[nTokenStart]["oMultiToken"] = oMultiToken; this.lTokens[nTokenEnd]["oMultiToken"] = oMultiToken; } else { console.log("# error: unknown action at " + sLineId); } } else if (cActionType == ">") { if (bDebug) { console.log(" COND_BREAK"); } |
︙ | ︙ | |||
874 875 876 877 878 879 880 | sNew = sRepl.gl_expand(m); sNew = sNew + " ".repeat(ln-sNew.length); } //console.log(sText+"\nstart: "+m.start[iGroup]+" end:"+m.end[iGroup]); return sText.slice(0, m.start[iGroup]) + sNew + sText.slice(m.end[iGroup]); } | | | | | | | | | | | | | | | 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 | sNew = sRepl.gl_expand(m); sNew = sNew + " ".repeat(ln-sNew.length); } //console.log(sText+"\nstart: "+m.start[iGroup]+" end:"+m.end[iGroup]); return sText.slice(0, m.start[iGroup]) + sNew + sText.slice(m.end[iGroup]); } _tagAndPrepareTokenForRewriting (sAction, nTokenRewriteStart, nTokenRewriteEnd, nTokenOffset, nLastToken, bCaseSvty, bDebug) { // text processor: rewrite tokens between <nTokenRewriteStart> and <nTokenRewriteEnd> position if (sAction === "*") { // purge text if (nTokenRewriteEnd - nTokenRewriteStart == 0) { this.lTokens[nTokenRewriteStart]["bToRemove"] = true; } else { for (let i = nTokenRewriteStart; i <= nTokenRewriteEnd; i++) { this.lTokens[i]["bToRemove"] = true; } } } else if (sAction === "␣") { // merge tokens this.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd; } else if (sAction.startsWith("␣")) { sAction = this._expand(sAction, nTokenOffset, nLastToken); this.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd; this.lTokens[nTokenRewriteStart]["sMergedValue"] = sAction.slice(1); } else if (sAction === "_") { // neutralized token if (nTokenRewriteEnd - nTokenRewriteStart == 0) { this.lTokens[nTokenRewriteStart]["sNewValue"] = "_"; } else { for (let i = nTokenRewriteStart; i <= nTokenRewriteEnd; i++) { this.lTokens[i]["sNewValue"] = "_"; } } } else { if (sAction.startsWith("=")) { sAction = gc_functions[sAction.slice(1)](this.lTokens, nTokenOffset, nLastToken); } else { sAction = this._expand(sAction, nTokenOffset, nLastToken); } let bUppercase = bCaseSvty && this.lTokens[nTokenRewriteStart]["sValue"].slice(0,1).gl_isUpperCase(); if (nTokenRewriteEnd - nTokenRewriteStart == 0) { // one token if (bUppercase) { sAction = sAction.gl_toCapitalize(); } this.lTokens[nTokenRewriteStart]["sNewValue"] = sAction; } else { // several tokens let lTokenValue = sAction.split("|"); if (lTokenValue.length != (nTokenRewriteEnd - nTokenRewriteStart + 1)) { if (bDebug) { console.log("Error. Text processor: number of replacements != number of tokens."); } return; } let j = 0; |
︙ | ︙ |
Modified gc_core/js/lang_core/gc_functions.js from [d31e5f26d5] to [ed16810cd6].
︙ | ︙ | |||
198 199 200 201 202 203 204 | if (sValues.includes(sValue)) { return true; } } return false; } | | < < < | 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | if (sValues.includes(sValue)) { return true; } } return false; } function g_morph (oToken, sPattern, sNegPattern="", nLeft=null, nRight=null) { // analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies let lMorph; if (oToken.hasOwnProperty("lMorph")) { lMorph = oToken["lMorph"]; } else { if (nLeft !== null) { let sValue = (nRight !== null) ? oToken["sValue"].slice(nLeft, nRight) : oToken["sValue"].slice(nLeft); lMorph = gc_engine.oSpellChecker.getMorph(sValue); } else { lMorph = gc_engine.oSpellChecker.getMorph(oToken["sValue"]); } } if (lMorph.length == 0) { return false; } |
︙ | ︙ | |||
234 235 236 237 238 239 240 | } } } // search sPattern return lMorph.some(sMorph => (sMorph.search(sPattern) !== -1)); } | > > > > > > > > > > > > > > > > > > > > > > > > > | < < < | 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 | } } } // search sPattern return lMorph.some(sMorph => (sMorph.search(sPattern) !== -1)); } function g_morphx (oToken, sPattern, sNegPattern="") { // analyse a multi-token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies if (!oToken.hasOwnProperty("oMultiToken")) { return false; } let lMorph = oToken["oMultiToken"]["lMorph"]; if (lMorph.length == 0) { return false; } // check negative condition if (sNegPattern) { if (sNegPattern == "*") { // all morph must match sPattern return lMorph.every(sMorph => (sMorph.search(sPattern) !== -1)); } else { if (lMorph.some(sMorph => (sMorph.search(sNegPattern) !== -1))) { return false; } } } // search sPattern return lMorph.some(sMorph => (sMorph.search(sPattern) !== -1)); } function g_morph0 (oToken, sPattern, sNegPattern="", nLeft=null, nRight=null) { // analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies let lMorph; if (nLeft !== null) { let sValue = (nRight !== null) ? oToken["sValue"].slice(nLeft, nRight) : oToken["sValue"].slice(nLeft); lMorph = gc_engine.oSpellChecker.getMorph(sValue); } else { lMorph = gc_engine.oSpellChecker.getMorph(oToken["sValue"]); } if (lMorph.length == 0) { return false; } // check negative condition |
︙ | ︙ |
Modified gc_core/py/lang_core/gc_engine.py from [2b36e73536] to [caa8d936e0].
︙ | ︙ | |||
338 339 340 341 342 343 344 | sText = self.parseGraph(_rules_graph.dAllGraph[sGraphName], sCountry, dOptions, bShowRuleId, bDebug, bContext) elif not sOption or dOptions.get(sOption, False): # regex rules for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup: if sRuleId not in _aIgnoredRules: for m in zRegex.finditer(sText): bCondMemo = None | | | | | | 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 | sText = self.parseGraph(_rules_graph.dAllGraph[sGraphName], sCountry, dOptions, bShowRuleId, bDebug, bContext) elif not sOption or dOptions.get(sOption, False): # regex rules for zRegex, bUppercase, sLineId, sRuleId, nPriority, lActions in lRuleGroup: if sRuleId not in _aIgnoredRules: for m in zRegex.finditer(sText): bCondMemo = None for sFuncCond, cActionType, sAction, *eAct in lActions: # action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ] try: bCondMemo = not sFuncCond or getattr(gc_functions, sFuncCond)(sText, sText0, m, self.dTokenPos, sCountry, bCondMemo) if bCondMemo: if bDebug: echo("RULE: " + sLineId) if cActionType == "-": # grammar error nErrorStart = nOffset + m.start(eAct[0]) if nErrorStart not in self.dError or nPriority > self.dErrorPriority.get(nErrorStart, -1): self.dError[nErrorStart] = self._createErrorFromRegex(sText, sText0, sAction, nOffset, m, eAct[0], sLineId, sRuleId, bUppercase, eAct[1], eAct[2], bShowRuleId, sOption, bContext) self.dErrorPriority[nErrorStart] = nPriority self.dSentenceError[nErrorStart] = self.dError[nErrorStart] elif cActionType == "~": # text processor sText = self.rewriteText(sText, sAction, eAct[0], m, bUppercase) bChange = True if bDebug: echo("~ " + sText + " -- " + m.group(eAct[0]) + " # " + sLineId) elif cActionType == "=": # disambiguation if not bParagraph: getattr(gc_functions, sAction)(sText, m, self.dTokenPos) if bDebug: echo("= " + m.group(0) + " # " + sLineId) elif cActionType == ">": # we do nothing, this test is just a condition to apply all following actions pass else: echo("# error: unknown action at " + sLineId) |
︙ | ︙ | |||
394 395 396 397 398 399 400 | dToken["aTags"] = self.dTokenPos[dToken["nStart"]]["aTags"] self.lTokens = lNewTokens self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lTokens if dToken["sType"] != "INFO" } if bDebug: echo("UPDATE:") echo(self) | | | 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 | dToken["aTags"] = self.dTokenPos[dToken["nStart"]]["aTags"] self.lTokens = lNewTokens self.dTokenPos = { dToken["nStart"]: dToken for dToken in self.lTokens if dToken["sType"] != "INFO" } if bDebug: echo("UPDATE:") echo(self) def _getNextNodes (self, dGraph, dToken, dNode, bKeep=False): "generator: return matches where <dToken> “values” match <dNode> arcs" bTokenFound = False # token value if dToken["sValue"] in dNode: yield (" ", dToken["sValue"], dNode[dToken["sValue"]]) bTokenFound = True if dToken["sValue"][0:2].istitle(): # we test only 2 first chars, to match words such as "Laissez-les", "Crève-cœur". |
︙ | ︙ | |||
505 506 507 508 509 510 511 512 513 514 515 516 517 518 | bTokenFound = True else: if sNegPattern and any(re.search(sNegPattern, sMorph) for sMorph in lMorph): continue if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph): yield ("@", sRegex, dNode["<re_morph>"][sRegex]) bTokenFound = True # token tags if "aTags" in dToken and "<tags>" in dNode: for sTag in dToken["aTags"]: if sTag in dNode["<tags>"]: yield ("/", sTag, dNode["<tags>"][sTag]) bTokenFound = True # meta arc (for token type) | > > > > > > > > > > > > > > > > > > > > > > > > > | 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 | bTokenFound = True else: if sNegPattern and any(re.search(sNegPattern, sMorph) for sMorph in lMorph): continue if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph): yield ("@", sRegex, dNode["<re_morph>"][sRegex]) bTokenFound = True # regex multi morph arcs if "<re_mmorph>" in dNode: if "nMultiStartTo" in dToken: lMorph = dToken["dMultiToken"]["lMorph"] for sRegex in dNode["<re_mmorph>"]: if "¬" not in sRegex: # no anti-pattern if any(re.search(sRegex, sMorph) for sMorph in lMorph): yield ("&", sRegex, dNode["<re_mmorph>"][sRegex]) bTokenFound = True else: # there is an anti-pattern sPattern, sNegPattern = sRegex.split("¬", 1) if sNegPattern == "*": # all morphologies must match with <sPattern> if sPattern: if all(re.search(sPattern, sMorph) for sMorph in lMorph): yield ("&", sRegex, dNode["<re_mmorph>"][sRegex]) bTokenFound = True else: if sNegPattern and any(re.search(sNegPattern, sMorph) for sMorph in lMorph): continue if not sPattern or any(re.search(sPattern, sMorph) for sMorph in lMorph): yield ("&", sRegex, dNode["<re_mmorph>"][sRegex]) bTokenFound = True # token tags if "aTags" in dToken and "<tags>" in dNode: for sTag in dToken["aTags"]: if sTag in dNode["<tags>"]: yield ("/", sTag, dNode["<tags>"][sTag]) bTokenFound = True # meta arc (for token type) |
︙ | ︙ | |||
527 528 529 530 531 532 533 | yield ("*", sMeta, dNode["<meta>"][sMeta]) bTokenFound = True if not bTokenFound and bKeep: yield (None, "", -1) # JUMP # Warning! Recursion! if "<>" in dNode: | | | | | > > > > > | | | | > | | | | | > | | | > > > | | > | | | | | | | | | | > > > > > > > > > > > > > > > > > | 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 | yield ("*", sMeta, dNode["<meta>"][sMeta]) bTokenFound = True if not bTokenFound and bKeep: yield (None, "", -1) # JUMP # Warning! Recursion! if "<>" in dNode: yield from self._getNextNodes(dGraph, dToken, dGraph[dNode["<>"]], bKeep=True) def parseGraph (self, dGraph, sCountry="${country_default}", dOptions=None, bShowRuleId=False, bDebug=False, bContext=False): "parse graph with tokens from the text and execute actions encountered" lPointers = [] bTagAndRewrite = False for iToken, dToken in enumerate(self.lTokens): if bDebug: echo("TOKEN: " + dToken["sValue"]) # check arcs for each existing pointer lNextPointers = [] for dPointer in lPointers: if dPointer["nMultiEnd"] != -1: if dToken["i"] <= dPointer["nMultiEnd"]: lNextPointers.append(dPointer) if dToken["i"] != dPointer["nMultiEnd"]: continue for cNodeType, sMatch, iNode in self._getNextNodes(dGraph, dToken, dGraph[dPointer["iNode"]]): if cNodeType is None: lNextPointers.append(dPointer) continue if bDebug: echo(" MATCH: " + cNodeType + sMatch) nMultiEnd = -1 if cNodeType != "&" else dToken["nMultiStartTo"] lNextPointers.append({ "iToken1": dPointer["iToken1"], "iNode": iNode, "nMultiEnd": nMultiEnd }) lPointers = lNextPointers # check arcs of first nodes for cNodeType, sMatch, iNode in self._getNextNodes(dGraph, dToken, dGraph[0]): if cNodeType is None: continue if bDebug: echo(" MATCH: " + cNodeType + sMatch) nMultiEnd = -1 if cNodeType != "&" else dToken["nMultiStartTo"] lPointers.append({ "iToken1": iToken, "iNode": iNode, "nMultiEnd": nMultiEnd }) # check if there is rules to check for each pointer for dPointer in lPointers: if dPointer["nMultiEnd"] != -1: if dToken["i"] < dPointer["nMultiEnd"]: continue if dToken["i"] == dPointer["nMultiEnd"]: dPointer["nMultiEnd"] = -1 if "<rules>" in dGraph[dPointer["iNode"]]: bChange = self._executeActions(dGraph, dGraph[dPointer["iNode"]]["<rules>"], dPointer["iToken1"]-1, iToken, dOptions, sCountry, bShowRuleId, bDebug, bContext) if bChange: bTagAndRewrite = True if bTagAndRewrite: self.rewriteFromTags(bDebug) if bDebug: echo(self) return self.sSentence def _executeActions (self, dGraph, dNode, nTokenOffset, nLastToken, dOptions, sCountry, bShowRuleId, bDebug, bContext): "execute actions found in the DARG" bChange = False for sLineId, nextNodeKey in dNode.items(): bCondMemo = None for sRuleId in dGraph[nextNodeKey]: try: if bDebug: echo(" >TRY: " + sRuleId + " " + sLineId) _, sOption, sFuncCond, cActionType, sAction, *eAct = _rules_graph.dRule[sRuleId] # Suggestion [ option, condition, "-", replacement/suggestion/action, iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, iURL ] # TextProcessor [ option, condition, "~", replacement/suggestion/action, iTokenStart, iTokenEnd, bCaseSvty ] # Disambiguator [ option, condition, "=", replacement/suggestion/action ] # Tag [ option, condition, "/", replacement/suggestion/action, iTokenStart, iTokenEnd ] # Immunity [ option, condition, "!", option, iTokenStart, iTokenEnd ] # Multi-token [ option, condition, "&", morphologies, iTokenStart, iTokenEnd ] # Test [ option, condition, ">", "" ] if not sOption or dOptions.get(sOption, False): bCondMemo = not sFuncCond or getattr(gc_functions, sFuncCond)(self.lTokens, nTokenOffset, nLastToken, sCountry, bCondMemo, self.dTags, self.sSentence, self.sSentence0) if bCondMemo: if cActionType == "-": # grammar error iTokenStart, iTokenEnd, cStartLimit, cEndLimit, bCaseSvty, nPriority, sMessage, iURL = eAct nTokenErrorStart = nTokenOffset + iTokenStart if iTokenStart > 0 else nLastToken + iTokenStart if "sImmunity" not in self.lTokens[nTokenErrorStart] or (self.lTokens[nTokenErrorStart]["sImmunity"] != "*" and sOption not in self.lTokens[nTokenErrorStart]["sImmunity"]): nTokenErrorEnd = nTokenOffset + iTokenEnd if iTokenEnd > 0 else nLastToken + iTokenEnd nErrorStart = self.nOffsetWithinParagraph + (self.lTokens[nTokenErrorStart]["nStart"] if cStartLimit == "<" else self.lTokens[nTokenErrorStart]["nEnd"]) nErrorEnd = self.nOffsetWithinParagraph + (self.lTokens[nTokenErrorEnd]["nEnd"] if cEndLimit == ">" else self.lTokens[nTokenErrorEnd]["nStart"]) if nErrorStart not in self.dError or nPriority > self.dErrorPriority.get(nErrorStart, -1): self.dError[nErrorStart] = self._createErrorFromTokens(sAction, nTokenOffset, nLastToken, nTokenErrorStart, nErrorStart, nErrorEnd, sLineId, sRuleId, bCaseSvty, \ sMessage, _rules_graph.dURL.get(iURL, ""), bShowRuleId, sOption, bContext) self.dErrorPriority[nErrorStart] = nPriority self.dSentenceError[nErrorStart] = self.dError[nErrorStart] if bDebug: echo(" NEW_ERROR: {}".format(self.dError[nErrorStart])) elif cActionType == "~": # text processor nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0] nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1] self._tagAndPrepareTokenForRewriting(sAction, nTokenStart, nTokenEnd, nTokenOffset, nLastToken, eAct[2], bDebug) bChange = True if bDebug: echo(" TEXT_PROCESSOR: [{}:{}] > {}".format(self.lTokens[nTokenStart]["sValue"], self.lTokens[nTokenEnd]["sValue"], sAction)) elif cActionType == "=": # disambiguation getattr(gc_functions, sAction)(self.lTokens, nTokenOffset, nLastToken) if bDebug: echo(" DISAMBIGUATOR: ({}) [{}:{}]".format(sAction, self.lTokens[nTokenOffset+1]["sValue"], self.lTokens[nLastToken]["sValue"])) elif cActionType == ">": # we do nothing, this test is just a condition to apply all following actions if bDebug: echo(" COND_OK") elif cActionType == "/": # Tag nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0] nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1] for i in range(nTokenStart, nTokenEnd+1): if "aTags" in self.lTokens[i]: self.lTokens[i]["aTags"].update(sAction.split("|")) else: self.lTokens[i]["aTags"] = set(sAction.split("|")) if bDebug: echo(" TAG: {} > [{}:{}]".format(sAction, self.lTokens[nTokenStart]["sValue"], self.lTokens[nTokenEnd]["sValue"])) for sTag in sAction.split("|"): if sTag not in self.dTags: self.dTags[sTag] = [nTokenStart, nTokenEnd] else: self.dTags[sTag][0] = min(nTokenStart, self.dTags[sTag][0]) self.dTags[sTag][1] = max(nTokenEnd, self.dTags[sTag][1]) elif cActionType == "!": # immunity if bDebug: echo(" IMMUNITY: " + sLineId + " / " + sRuleId) nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0] nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1] sImmunity = sAction or "*" if nTokenEnd - nTokenStart == 0: self.lTokens[nTokenStart]["sImmunity"] = sImmunity nErrorStart = self.nOffsetWithinParagraph + self.lTokens[nTokenStart]["nStart"] if nErrorStart in self.dError: del self.dError[nErrorStart] else: for i in range(nTokenStart, nTokenEnd+1): self.lTokens[i]["sImmunity"] = sImmunity nErrorStart = self.nOffsetWithinParagraph + self.lTokens[i]["nStart"] if nErrorStart in self.dError: del self.dError[nErrorStart] elif cActionType == "&": # multi-tokens nTokenStart = nTokenOffset + eAct[0] if eAct[0] > 0 else nLastToken + eAct[0] nTokenEnd = nTokenOffset + eAct[1] if eAct[1] > 0 else nLastToken + eAct[1] dMultiToken = { "nTokenStart": nTokenStart, "nTokenEnd": nTokenEnd, "lTokens": self.lTokens[nTokenStart:nTokenEnd+1], "lMorph": sAction.split("|") if sAction else [":HM"] } self.lTokens[nTokenStart]["nMultiStartTo"] = nTokenEnd self.lTokens[nTokenEnd]["nMultiEndFrom"] = nTokenStart self.lTokens[nTokenStart]["dMultiToken"] = dMultiToken self.lTokens[nTokenEnd]["dMultiToken"] = dMultiToken if bDebug: echo(" MULTI-TOKEN: ({}) [{}:{}]".format(sAction, self.lTokens[nTokenOffset+1]["sValue"], self.lTokens[nLastToken]["sValue"])) #print(dMultiToken) else: echo("# error: unknown action at " + sLineId) elif cActionType == ">": if bDebug: echo(" COND_BREAK") break except Exception as e: |
︙ | ︙ | |||
771 772 773 774 775 776 777 | if bUppercase and m.group(iGroup)[0:1].isupper(): sNew = sNew.capitalize() else: sNew = m.expand(sRepl) sNew = sNew + " " * (nLen-len(sNew)) return sText[0:m.start(iGroup)] + sNew + sText[m.end(iGroup):] | | | | | | | | | | | | | | | 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 | if bUppercase and m.group(iGroup)[0:1].isupper(): sNew = sNew.capitalize() else: sNew = m.expand(sRepl) sNew = sNew + " " * (nLen-len(sNew)) return sText[0:m.start(iGroup)] + sNew + sText[m.end(iGroup):] def _tagAndPrepareTokenForRewriting (self, sAction, nTokenRewriteStart, nTokenRewriteEnd, nTokenOffset, nLastToken, bCaseSvty, bDebug): "text processor: rewrite tokens between <nTokenRewriteStart> and <nTokenRewriteEnd> position" if sAction == "*": # purge text if nTokenRewriteEnd - nTokenRewriteStart == 0: self.lTokens[nTokenRewriteStart]["bToRemove"] = True else: for i in range(nTokenRewriteStart, nTokenRewriteEnd+1): self.lTokens[i]["bToRemove"] = True elif sAction == "␣": # merge tokens self.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd elif sAction.startswith("␣"): sAction = self._expand(sAction, nTokenOffset, nLastToken) self.lTokens[nTokenRewriteStart]["nMergeUntil"] = nTokenRewriteEnd self.lTokens[nTokenRewriteStart]["sMergedValue"] = sAction[1:] elif sAction == "_": # neutralized token if nTokenRewriteEnd - nTokenRewriteStart == 0: self.lTokens[nTokenRewriteStart]["sNewValue"] = "_" else: for i in range(nTokenRewriteStart, nTokenRewriteEnd+1): self.lTokens[i]["sNewValue"] = "_" else: if sAction.startswith("="): sAction = getattr(gc_functions, sAction[1:])(self.lTokens, nTokenOffset, nLastToken) else: sAction = self._expand(sAction, nTokenOffset, nLastToken) bUppercase = bCaseSvty and self.lTokens[nTokenRewriteStart]["sValue"][0:1].isupper() if nTokenRewriteEnd - nTokenRewriteStart == 0: # one token if bUppercase: sAction = sAction[0:1].upper() + sAction[1:] self.lTokens[nTokenRewriteStart]["sNewValue"] = sAction else: # several tokens lTokenValue = sAction.split("|") if len(lTokenValue) != (nTokenRewriteEnd - nTokenRewriteStart + 1): if bDebug: echo("Error. Text processor: number of replacements != number of tokens.") return for i, sValue in zip(range(nTokenRewriteStart, nTokenRewriteEnd+1), lTokenValue): if not sValue or sValue == "*": self.lTokens[i]["bToRemove"] = True |
︙ | ︙ |
Modified gc_core/py/lang_core/gc_functions.py from [069bca6c44] to [8b75051fd9].
︙ | ︙ | |||
184 185 186 187 188 189 190 | return True sValue = sValue.lower() if sValue in sValues: return True return False | | < < > > > > > > > > > > > > > > > > > > > > > | < < | 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 | return True sValue = sValue.lower() if sValue in sValues: return True return False def g_morph (dToken, sPattern, sNegPattern="", nLeft=None, nRight=None): "analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies" if "lMorph" in dToken: lMorph = dToken["lMorph"] else: if nLeft is not None: lMorph = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)]) else: lMorph = _oSpellChecker.getMorph(dToken["sValue"]) if not lMorph: return False # check negative condition if sNegPattern: if sNegPattern == "*": # all morph must match sPattern zPattern = re.compile(sPattern) return all(zPattern.search(sMorph) for sMorph in lMorph) zNegPattern = re.compile(sNegPattern) if any(zNegPattern.search(sMorph) for sMorph in lMorph): return False # search sPattern zPattern = re.compile(sPattern) return any(zPattern.search(sMorph) for sMorph in lMorph) def g_morphx (dToken, sPattern, sNegPattern=""): "analyse a multi-token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies" if not "dMultiToken" in dToken: return False lMorph = dToken["dMultiToken"]["lMorph"] if not lMorph: return False # check negative condition if sNegPattern: if sNegPattern == "*": # all morph must match sPattern zPattern = re.compile(sPattern) return all(zPattern.search(sMorph) for sMorph in lMorph) zNegPattern = re.compile(sNegPattern) if any(zNegPattern.search(sMorph) for sMorph in lMorph): return False # search sPattern zPattern = re.compile(sPattern) return any(zPattern.search(sMorph) for sMorph in lMorph) def g_morph0 (dToken, sPattern, sNegPattern="", nLeft=None, nRight=None): "analyse a token, return True if <sNegPattern> not in morphologies and <sPattern> in morphologies (disambiguation off)" if nLeft is not None: lMorph = _oSpellChecker.getMorph(dToken["sValue"][slice(nLeft, nRight)]) else: lMorph = _oSpellChecker.getMorph(dToken["sValue"]) if not lMorph: return False # check negative condition if sNegPattern: if sNegPattern == "*": |
︙ | ︙ |
Modified gc_lang/fr/modules-js/cregex.js from [c1ba0b4cea] to [2376d50b6d].
︙ | ︙ | |||
78 79 80 81 82 83 84 | _zNPf: new RegExp(":(?:M[12P]|T):f"), _zNPe: new RegExp(":(?:M[12P]|T):e"), ///// FONCTIONS getLemmaOfMorph: function (sMorph) { | | > > > > | 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | _zNPf: new RegExp(":(?:M[12P]|T):f"), _zNPe: new RegExp(":(?:M[12P]|T):e"), ///// FONCTIONS getLemmaOfMorph: function (sMorph) { let m = this._zLemma.exec(sMorph); if (m) { return m[1]; } return ""; }, agreement: function (l1, l2) { // returns True if agreement in gender and number is possible between morphologies <l1> and <l2> let [sGender1, sNumber1] = this.getGenderNumber(l1); let [sGender2, sNumber2] = this.getGenderNumber(l2); if (sNumber1 !== ":i" && sNumber2 !== ":i" && sNumber1 !== sNumber2) { |
︙ | ︙ |
Modified gc_lang/fr/modules-js/gce_analyseur.js from [7da47c7f30] to [233e133202].
︙ | ︙ | |||
9 10 11 12 13 14 15 | if (oToken["sValue"].includes("-t-")) { nEnd = nEnd - 2; } else if (oToken["sValue"].search(/-l(?:es?|a)-(?:[mt]oi|nous|leur)$|(?:[nv]ous|lui|leur)-en$/) != -1) { nEnd = oToken["sValue"].slice(0,nEnd).lastIndexOf("-"); } } | | | 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 | if (oToken["sValue"].includes("-t-")) { nEnd = nEnd - 2; } else if (oToken["sValue"].search(/-l(?:es?|a)-(?:[mt]oi|nous|leur)$|(?:[nv]ous|lui|leur)-en$/) != -1) { nEnd = oToken["sValue"].slice(0,nEnd).lastIndexOf("-"); } } return g_morph(oToken, sPattern, sNegPattern, 0, nEnd); } function apposition (sWord1, sWord2) { // returns true if nom + nom (no agreement required) return sWord2.length < 2 || (cregex.mbNomNotAdj(gc_engine.oSpellChecker.getMorph(sWord2)) && cregex.mbPpasNomNotAdj(gc_engine.oSpellChecker.getMorph(sWord1))); } |
︙ | ︙ |
Modified gc_lang/fr/modules/cregex.py from [d34aee52e1] to [412242b1dc].
︙ | ︙ | |||
78 79 80 81 82 83 84 | NPe = re.compile(":(?:M[12P]|T):e") #### FONCTIONS def getLemmaOfMorph (s): "return lemma in morphology <s>" | | > > > | 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | NPe = re.compile(":(?:M[12P]|T):e") #### FONCTIONS def getLemmaOfMorph (s): "return lemma in morphology <s>" m = Lemma.search(s) if m: return m.group(1) return "" def agreement (l1, l2): "returns True if agreement in gender and number is possible between morphologies <l1> and <l2>" sGender1, sNumber1 = getGenderNumber(l1) sGender2, sNumber2 = getGenderNumber(l2) if sNumber1 != ":i" and sNumber2 != ":i" and sNumber1 != sNumber2: return False |
︙ | ︙ |
Modified gc_lang/fr/modules/gce_analyseur.py from [af51223cf8] to [edfe6ed977].
1 2 3 4 5 6 7 8 9 10 11 12 13 | #### GRAMMAR CHECKING ENGINE PLUGIN: Parsing functions for French language from . import cregex as cr def g_morphVC (dToken, sPattern, sNegPattern=""): "lance la fonction g_morph() sur la première partie d’un verbe composé (ex: vient-il)" nEnd = dToken["sValue"].rfind("-") if dToken["sValue"].count("-") > 1: if "-t-" in dToken["sValue"]: nEnd = nEnd - 2 elif re.search("-l(?:es?|a)-(?:[mt]oi|nous|leur)$|(?:[nv]ous|lui|leur)-en$", dToken["sValue"]): nEnd = dToken["sValue"][0:nEnd].rfind("-") | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 | #### GRAMMAR CHECKING ENGINE PLUGIN: Parsing functions for French language from . import cregex as cr def g_morphVC (dToken, sPattern, sNegPattern=""): "lance la fonction g_morph() sur la première partie d’un verbe composé (ex: vient-il)" nEnd = dToken["sValue"].rfind("-") if dToken["sValue"].count("-") > 1: if "-t-" in dToken["sValue"]: nEnd = nEnd - 2 elif re.search("-l(?:es?|a)-(?:[mt]oi|nous|leur)$|(?:[nv]ous|lui|leur)-en$", dToken["sValue"]): nEnd = dToken["sValue"][0:nEnd].rfind("-") return g_morph(dToken, sPattern, sNegPattern, 0, nEnd) def apposition (sWord1, sWord2): "returns True if nom + nom (no agreement required)" return len(sWord2) < 2 or (cr.mbNomNotAdj(_oSpellChecker.getMorph(sWord2)) and cr.mbPpasNomNotAdj(_oSpellChecker.getMorph(sWord1))) |
︙ | ︙ |
Modified gc_lang/fr/rules.grx from [16699b2371] to [4d63d167b4].
︙ | ︙ | |||
428 429 430 431 432 433 434 435 436 437 438 439 440 441 | [)]\b(?![s¹²³⁴⁵⁶⁷⁸⁹⁰]\b) <<- not before("\\((?:[rR][eéEÉ]|[qQ][uU]’|[nNmMtTsSdDlL]’)$") ->> ") " && Il manque un espace après la parenthèse. __<s>/typo(typo_parenthèse_ouvrante_collée)__ \b[(](?=[^)][^)][^)]) <<- ->> " (" && Il manque un espace avant la parenthèse. TEST: C’est au fond du couloir{{(}}celui du deuxième étage{{)}}qu’il se trouve. ->> " (|||) " TEST: (a + b)² TEST: il faut (re)former tout ça. TEST: il (n’)est (qu’)ingénieur # Points et espaces __<s>/typo(typo_point_entre_deux_espaces)__ [ ][.](?=[ ]) <<- ->> . && Pas d’espace avant un point. | > | 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 | [)]\b(?![s¹²³⁴⁵⁶⁷⁸⁹⁰]\b) <<- not before("\\((?:[rR][eéEÉ]|[qQ][uU]’|[nNmMtTsSdDlL]’)$") ->> ") " && Il manque un espace après la parenthèse. __<s>/typo(typo_parenthèse_ouvrante_collée)__ \b[(](?=[^)][^)][^)]) <<- ->> " (" && Il manque un espace avant la parenthèse. TEST: C’est au fond du couloir{{(}}celui du deuxième étage{{)}}qu’il se trouve. ->> " (|||) " TEST: de gain différentiel 𝐴 (𝑉ᵣ = 𝐴·𝑣H{{)}}et associé ->> ") " TEST: (a + b)² TEST: il faut (re)former tout ça. TEST: il (n’)est (qu’)ingénieur # Points et espaces __<s>/typo(typo_point_entre_deux_espaces)__ [ ][.](?=[ ]) <<- ->> . && Pas d’espace avant un point. |
︙ | ︙ | |||
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 | TEST: « Je suis donc perdu ? », dit Paul. TEST: “C’est bon !”, croit savoir Marie. TEST: “Parce que… ?” finit par demander Paul. TEST: « Dans quel pays sommes-nous ? » demanda un manifestant. !! !! !! !! !! | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 | TEST: « Je suis donc perdu ? », dit Paul. TEST: “C’est bon !”, croit savoir Marie. TEST: “Parce que… ?” finit par demander Paul. TEST: « Dans quel pays sommes-nous ? » demanda un manifestant. !!!! Purge des références aux notes !! # les références aux notes __<s>(p_exposants)__ [¹²³⁴⁵⁶⁷⁸⁹⁰]+ <<- ~>> * __[i](p_références_aux_notes)__ ({w_2})(\d+) @@0,$ <<- not morph(\0, ":") and morph(\1, ":") ~2>> * TEST: POLITIQUESOCIÉTÉÉCONOMIEMONDECULTUREART DE VIVREMAGAZINE (qui peut faire boguer JavaScript avec certaines regex) !!!! Normalisation du “t” euphonique !! __<i]/tu(tu_t_euphonique_incorrect)__ ([-–—− ]t(?:[’' ][-–—−]?|[-–—−][’' ]?))(ie?ls?|elles?|on|tu) @@0,$ <<- re.search("(?i)^(?:ie?ls|elles|tu)$", \2) -1>> - && Le “t” euphonique n’est pas nécessaire avec “\2”.|http://bdl.oqlf.gouv.qc.ca/bdl/gabarit_bdl.asp?T1=t+euphonique&id=2513 <<- __else__ and \1 != "-t-" and \1 != "-T-" -1>> -t- && Pour le “t” euphonique, il faut deux traits d’union. Pas d’apostrophe. Pas d’espace. <<- \1 != "-t-" ~1>> -t- __<i]/tu(tu_t_euphonique_superflu)__ [td]([- ]t[-’' ])(?:ie?l|elle|on) @@1 <<- -1>> - && Le “t” euphonique est superflu quand le verbe se termine par “t” ou “d”.|http://bdl.oqlf.gouv.qc.ca/bdl/gabarit_bdl.asp?T1=t+euphonique&id=2513 <<- \1 != "-t-" ~1>> -t- __<i]/eleu(eleu_t_euphonique_manquant)__ [aec](-(ie?l|elle|on)) @@1,2 <<- -1>> -t-\2 && Euphonie. Il faut un “t” euphonique.|http://bdl.oqlf.gouv.qc.ca/bdl/gabarit_bdl.asp?T1=t+euphonique&id=2513 TEST: va{{ t’}}il y parvenir ? ->> -t- TEST: A{{ t’}}elle soif ? ->> -t- TEST: A{{ t-}}elle faim ? ->> -t- TEST: a{{ t'}}elle ->> -t- TEST: a{{-t'}}il ->> -t- TEST: a{{-t }}il. ->> -t- TEST: a{{ t’}}il. ->> -t- TEST: a{{ t-}}on. ->> -t- TEST: donne{{ t-}}il ->> -t- TEST: donne{{-t }}il ->> -t- TEST: vient{{-t-}}il ->> - TEST: viendras{{-t-}}tu ->> - TEST: Viendront{{ t-}}ils ->> - TEST: viennent{{ t-}}ils ->> - TEST: mangent{{-t-}}elles ->> - TEST: Ont{{ t’}}ils ->> - TEST: Ont{{-t’}}ils ->> - TEST: l’ont{{ t’}}ils vu ? ->> - TEST: exploite{{−t−}}il les ressources numériques ->> -t- TEST: vainc{{-il}} ses ennemis aisément ->> -t-il TEST: Assis, gronde{{-t -}}elle ->> -t- TEST: vient-il demain ? TEST: prend-elle l’avantage ? TEST: saura-t-on jamais la vérité ? TEST: arrive-t-elle ce matin ? TEST: y aura-t-il du poulet au dîner ? !! !! !! !! !! |
︙ | ︙ | |||
1635 1636 1637 1638 1639 1640 1641 | __[s]/num(num_lettre_O_zéro1)__ [\dO]+[O][\dO]+ <<- not option("ocr") ->> =\0.replace("O", "0") && S’il s’agit d’un nombre, utilisez le chiffre « 0 » plutôt que la lettre « O ». __[s]/num(num_lettre_O_zéro2)__ [1-9]O <<- not option("ocr") ->> =\0.replace("O", "0") && S’il s’agit d’un nombre, utilisez le chiffre « 0 » plutôt que la lettre « O ». TEST: année {{2O11}} ->> 2011 TEST: {{3O}} (chiffre avec un O). ->> 30 | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 | __[s]/num(num_lettre_O_zéro1)__ [\dO]+[O][\dO]+ <<- not option("ocr") ->> =\0.replace("O", "0") && S’il s’agit d’un nombre, utilisez le chiffre « 0 » plutôt que la lettre « O ». __[s]/num(num_lettre_O_zéro2)__ [1-9]O <<- not option("ocr") ->> =\0.replace("O", "0") && S’il s’agit d’un nombre, utilisez le chiffre « 0 » plutôt que la lettre « O ». TEST: année {{2O11}} ->> 2011 TEST: {{3O}} (chiffre avec un O). ->> 30 !!!! Traits d’union !! __[i]/tu(tu_trait_union_douteux)__ ({w1})(?:--|—|–|−|⁃)({w1}) @@0,$ <<- spell(\1+"-"+\2) and analyse(\1+"-"+\2, ":") ->> \1-\2 && Trait d’union : un tiret simple suffit. TEST: Nous préparons une {{contre–attaque}}. ->> contre-attaque TEST: Nous préparons une {{contre−attaque}}. ->> contre-attaque @@@@ @@@@ @@@@ @@@@ @@@@GRAPH: graphe0|g0 _ |
︙ | ︙ | |||
4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 | # <<- \1 == \2 # and not value(\2, "|nous|vous|faire|en|la|lui|donnant|œuvre|ah|oh|eh|hé|ho|ha|hou|olé|joli|Bora|couvent|dément|sapiens|très|vroum|") # and not (value(\1, "|est|une|") and value(<1, "|l’|d’|")) # and not (\2 == "mieux" and value(<1, "|qui|")) # ->> \1 && Doublon. # #TEST: Il y a un {{doublon doublon}}. ->> doublon !! !! !!!! Élisions & euphonie !! !! !! | > | 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 | # <<- \1 == \2 # and not value(\2, "|nous|vous|faire|en|la|lui|donnant|œuvre|ah|oh|eh|hé|ho|ha|hou|olé|joli|Bora|couvent|dément|sapiens|très|vroum|") # and not (value(\1, "|est|une|") and value(<1, "|l’|d’|")) # and not (\2 == "mieux" and value(<1, "|qui|")) # ->> \1 && Doublon. # #TEST: Il y a un {{doublon doublon}}. ->> doublon !! !! !!!! Élisions & euphonie !! !! !! |
︙ | ︙ | |||
23205 23206 23207 23208 23209 23210 23211 | <<- /conf/ -3>> nues && Confusion. Écrivez “nues” (ancien terme pour “nuages”).|https://fr.wiktionary.org/wiki/tomber_des_nues TEST: Elle en est tombée des {{nus}}. ->> nues # numérique / digital __conf_numérique_digital__ | | > > | 23208 23209 23210 23211 23212 23213 23214 23215 23216 23217 23218 23219 23220 23221 23222 23223 23224 | <<- /conf/ -3>> nues && Confusion. Écrivez “nues” (ancien terme pour “nuages”).|https://fr.wiktionary.org/wiki/tomber_des_nues TEST: Elle en est tombée des {{nus}}. ->> nues # numérique / digital __conf_numérique_digital__ [>agence|>appareil|>banque|>caméra|>colonie|>colonisation|>communication|>compagnie|>connexion] >digital [>document|>économie|>entreprise|>ère|>expérience|>fichier|>identité|>industrie|>présence|>prise] >digital [>service|>solution|>stratégie|>télévision|>transformation|>transition|>révolution] >digital <<- /conf/ -2>> numérique|numériques && Confusion : “digital” est un adjectif se rapportant aux doigts (empreinte digitale, arthrose digitale, etc.). Écrivez “numérique”. [le|du|au] digital <<- /conf/ -2>> numérique && Confusion : “digital” est un adjectif se rapportant aux doigts (empreinte digitale, arthrose digitale, etc.). Écrivez “numérique”. |
︙ | ︙ |
Modified misc/grammalecte.sublime-color-scheme from [c24fa9f267] to [a8384ba127].
︙ | ︙ | |||
64 65 66 67 68 69 70 | { "name": "Entity Valid", "scope": "entity.valid", "foreground": "hsl(150, 100%, 80%)", "background": "hsl(150, 100%, 20%)", "font_style": "bold", }, { "name": "Entity Invalid", "scope": "entity.invalid", "foreground": "hsl(0, 100%, 80%)", "background": "hsl(0, 100%, 20%)", "font_style": "bold", }, { "name": "Token meta", "scope": "string.meta", "foreground": "hsl(270, 100%, 90%)", "background": "hsl(270, 100%, 40%)", }, { "name": "Token token", "scope": "string.token", "foreground": "hsl(240, 50%, 90%)", "background": "hsl(240, 50%, 40%)", }, { "name": "Token Jumptoken", "scope": "string.jumptoken", "foreground": "hsl(0, 50%, 90%)", "background": "hsl(10, 50%, 40%)", }, { "name": "Token lemma", "scope": "string.lemma", "foreground": "hsl(210, 100%, 80%)", "background": "hsl(210, 100%, 15%)", }, | | | > | 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | { "name": "Entity Valid", "scope": "entity.valid", "foreground": "hsl(150, 100%, 80%)", "background": "hsl(150, 100%, 20%)", "font_style": "bold", }, { "name": "Entity Invalid", "scope": "entity.invalid", "foreground": "hsl(0, 100%, 80%)", "background": "hsl(0, 100%, 20%)", "font_style": "bold", }, { "name": "Token meta", "scope": "string.meta", "foreground": "hsl(270, 100%, 90%)", "background": "hsl(270, 100%, 40%)", }, { "name": "Token token", "scope": "string.token", "foreground": "hsl(240, 50%, 90%)", "background": "hsl(240, 50%, 40%)", }, { "name": "Token Jumptoken", "scope": "string.jumptoken", "foreground": "hsl(0, 50%, 90%)", "background": "hsl(10, 50%, 40%)", }, { "name": "Token lemma", "scope": "string.lemma", "foreground": "hsl(210, 100%, 80%)", "background": "hsl(210, 100%, 15%)", }, { "name": "Token phonet", "scope": "string.phonet", "foreground": "hsl(90, 100%, 80%)", "background": "hsl(90, 100%, 10%)", }, { "name": "Token tag", "scope": "string.tag", "foreground": "hsl(30, 100%, 90%)", "background": "hsl(30, 100%, 20%)", }, { "name": "Token regex", "scope": "string.regex", "foreground": "hsl(60, 100%, 80%)", "background": "hsl(60, 100%, 10%)", }, { "name": "Token morph regex", "scope": "string.morph.regex", "foreground": "hsl(150, 80%, 90%)", "background": "hsl(150, 80%, 10%)", }, { "name": "Token morph negregex", "scope": "string.morph.negregex", "foreground": "hsl(0, 80%, 90%)", "background": "hsl(0, 80%, 10%)", }, { "name": "MulToken morph regex", "scope": "string.mt.morph.regex", "foreground": "hsl(180, 80%, 90%)", "background": "hsl(180, 80%, 10%)", }, { "name": "Keyword Python", "scope": "keyword.python", "foreground": "#A0A0A0", }, { "name": "Keyword", "scope": "keyword - (source.c keyword.operator | source.c++ keyword.operator | source.objc keyword.operator | source.objc++ keyword.operator), keyword.operator.word", "foreground": "#F06070", }, { "name": "String", "scope": "string", "foreground": "hsl(40, 100%, 80%)", }, { "name": "Number", "scope": "constant.numeric", "foreground": "hsl(270, 100%, 70%)", "font_style": "bold", }, |
︙ | ︙ |
Modified misc/grammalecte.sublime-syntax from [9e26acd942] to [d99bbc135a].
︙ | ︙ | |||
166 167 168 169 170 171 172 | - match: '(@)([^@\s¬]*)' scope: string.morph captures: 1: entity.valid 2: string.morph.regex | | > > > > > > | 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | - match: '(@)([^@\s¬]*)' scope: string.morph captures: 1: entity.valid 2: string.morph.regex - match: '(\$)([^\s¬]*)' scope: string.morph captures: 1: entity.valid 2: string.morph.regex - match: '(&)([^\s¬]*)' scope: string.morph captures: 1: entity.valid 2: string.mt.morph.regex - match: '(/)[\w-]+' scope: string.tag captures: 1: entity.valid - match: '(?<=[^\w])([*][a-zA-Z0-9_]+)' scope: string.morph |
︙ | ︙ |