42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
|
def prepareFunction (sCode):
"convert simple rule syntax to a string of Python code"
if sCode[0:1] == "=":
sCode = sCode[1:]
sCode = sCode.replace("__also__", "bCondMemo")
sCode = sCode.replace("__else__", "not bCondMemo")
sCode = sCode.replace("sContext", "_sAppContext")
sCode = re.sub(r"(morph|morphVC|analyse|value|tag|displayInfo)[(]\\(\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode)
sCode = re.sub(r"(morph|morphVC|analyse|value|tag|displayInfo)[(]\\-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
sCode = re.sub(r"(select|exclude|define|define_from)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode)
sCode = re.sub(r"(select|exclude|define|define_from)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
sCode = re.sub(r"(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], dTags', sCode)
sCode = re.sub(r"(tag_before|tag_after)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1], dTags', sCode)
sCode = re.sub(r"space_after[(][\\](\d+)", 'g_space_between_tokens(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"space_after[(][\\]-(\d+)", 'g_space_between_tokens(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"analyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"analyse_with_next[(][\\]-(\d+)", 'g_merged_analyse(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"(morph|analyse|tag|value)\(>1", 'g_\\1(lToken[nLastToken+1]', sCode) # next token
sCode = re.sub(r"(morph|analyse|tag|value)\(<1", 'g_\\1(lToken[nTokenOffset]', sCode) # previous token
sCode = re.sub(r"(morph|analyse|tag|value)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', sCode) # next token
sCode = re.sub(r"(morph|analyse|tag|value)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', sCode) # previous token
sCode = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', sCode)
sCode = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before(sCode)
sCode = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after(sCode)
sCode = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before0(sCode)
sCode = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after0(sCode)
sCode = re.sub(r"analyseWord[(]", 'analyse(', sCode)
sCode = re.sub(r"[\\](\d+)", 'lToken[nTokenOffset+\\1]["sValue"]', sCode)
sCode = re.sub(r"[\\]-(\d+)", 'lToken[nLastToken-\\1+1]["sValue"]', sCode)
sCode = re.sub(r">1", 'lToken[nLastToken+1]["sValue"]', sCode)
sCode = re.sub(r"<1", 'lToken[nTokenOffset]["sValue"]', sCode)
return sCode
def genTokenLines (sTokenLine, dDef):
"tokenize a string and return a list of lines of tokens"
lToken = sTokenLine.split()
lTokenLines = []
for sToken in lToken:
# replace merger characters by spaces
if "␣" in sToken:
sToken = sToken.replace("␣", " ")
# optional token?
bNullPossible = sToken.startswith("?") and sToken.endswith("¿")
if bNullPossible:
sToken = sToken[1:-1]
# token with definition?
if sToken.startswith("({") and sToken.endswith("})") and sToken[1:-1] in dDef:
sToken = "(" + dDef[sToken[1:-1]] + ")"
elif sToken.startswith("{") and sToken.endswith("}") and sToken in dDef:
sToken = dDef[sToken]
if ( (sToken.startswith("[") and sToken.endswith("]")) or (sToken.startswith("([") and sToken.endswith("])")) ):
# multiple token
bSelectedGroup = sToken.startswith("(") and sToken.endswith(")")
if bSelectedGroup:
sToken = sToken[1:-1]
lNewToken = sToken[1:-1].split("|")
if not lTokenLines:
lTokenLines = [ ["("+s+")"] for s in lNewToken ] if bSelectedGroup else [ [s] for s in lNewToken ]
if bNullPossible:
lTokenLines.extend([ [] for i in range(len(lNewToken)+1) ])
else:
lNewTemp = []
if bNullPossible:
for aRule in lTokenLines:
for sElem in lNewToken:
aNewRule = list(aRule)
aNewRule.append(sElem)
lNewTemp.append(aNewRule)
else:
sElem1 = lNewToken.pop(0)
for aRule in lTokenLines:
for sElem in lNewToken:
aNewRule = list(aRule)
aNewRule.append("(" + sElem + ")" if bSelectedGroup else sElem)
lNewTemp.append(aNewRule)
aRule.append("(" + sElem1 + ")" if bSelectedGroup else sElem1)
lTokenLines.extend(lNewTemp)
else:
# simple token
if not lTokenLines:
lTokenLines = [[sToken], []] if bNullPossible else [[sToken]]
else:
if bNullPossible:
lNewTemp = []
for aRule in lTokenLines:
lNew = list(aRule)
lNew.append(sToken)
lNewTemp.append(lNew)
lTokenLines.extend(lNewTemp)
else:
for aRule in lTokenLines:
aRule.append(sToken)
for aRule in lTokenLines:
yield aRule
def createRule (iLine, sRuleName, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef):
"generator: create rule as list"
# print(iLine, "//", sRuleName, "//", sTokenLine, "//", sActions, "//", nPriority)
for lToken in genTokenLines(sTokenLine, dDef):
# Calculate positions
dPos = {} # key: iGroup, value: iToken
iGroup = 0
#if iLine == 15818: # debug
# print(" ".join(lToken))
for i, sToken in enumerate(lToken):
if sToken.startswith("(") and sToken.endswith(")"):
lToken[i] = sToken[1:-1]
iGroup += 1
dPos[iGroup] = i + 1 # we add 1, for we count tokens from 1 to n (not from 0)
# Parse actions
for iAction, sAction in enumerate(sActions.split(" <<- ")):
sAction = sAction.strip()
if sAction:
sActionId = sRuleName + "__b" + str(iActionBlock) + "_a" + str(iAction)
aAction = createAction(sActionId, sAction, nPriority, dOptPriority, len(lToken), dPos)
if aAction:
sActionName = storeAction(sActionId, aAction)
lResult = list(lToken)
lResult.extend(["##"+str(iLine), sActionName])
#if iLine == 13341:
# print(" ".join(lToken))
# print(sActionId, aAction)
yield lResult
else:
print(" # Error on action at line:", iLine)
print(sTokenLine, "\n", sActions)
def changeReferenceToken (sText, dPos):
"change group reference in <sText> with values in <dPos>"
if "\\" not in sText:
return sText
for i in range(len(dPos), 0, -1):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
|
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
|
def prepareFunction (sCode):
"convert simple rule syntax to a string of Python code"
if sCode[0:1] == "=":
sCode = sCode[1:]
sCode = sCode.replace("__also__", "bCondMemo")
sCode = sCode.replace("__else__", "not bCondMemo")
sCode = sCode.replace("sContext", "_sAppContext")
sCode = re.sub(r"\b(morph|morphVC|analyse|value|tag|displayInfo)[(]\\(\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode)
sCode = re.sub(r"\b(morph|morphVC|analyse|value|tag|displayInfo)[(]\\-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
sCode = re.sub(r"\b(select|exclude|define|define_from|add_morph|change_meta)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2]', sCode)
sCode = re.sub(r"\b(select|exclude|define|define_from|add_morph|change_meta)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1]', sCode)
sCode = re.sub(r"\b(tag_before|tag_after)[(][\\](\d+)", 'g_\\1(lToken[nTokenOffset+\\2], dTags', sCode)
sCode = re.sub(r"\b(tag_before|tag_after)[(][\\]-(\d+)", 'g_\\1(lToken[nLastToken-\\2+1], dTags', sCode)
sCode = re.sub(r"\bspace_after[(][\\](\d+)", 'g_space_between_tokens(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"\bspace_after[(][\\]-(\d+)", 'g_space_between_tokens(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"\banalyse_with_next[(][\\](\d+)", 'g_merged_analyse(lToken[nTokenOffset+\\1], lToken[nTokenOffset+\\1+1]', sCode)
sCode = re.sub(r"\banalyse_with_next[(][\\]-(\d+)", 'g_merged_analyse(lToken[nLastToken-\\1+1], lToken[nLastToken-\\1+2]', sCode)
sCode = re.sub(r"\b(morph|analyse|tag|value)\(>1", 'g_\\1(lToken[nLastToken+1]', sCode) # next token
sCode = re.sub(r"\b(morph|analyse|tag|value)\(<1", 'g_\\1(lToken[nTokenOffset]', sCode) # previous token
sCode = re.sub(r"\b(morph|analyse|tag|value)\(>(\d+)", 'g_\\1(g_token(lToken, nLastToken+\\2)', sCode) # next token
sCode = re.sub(r"\b(morph|analyse|tag|value)\(<(\d+)", 'g_\\1(g_token(lToken, nTokenOffset+1-\\2)', sCode) # previous token
sCode = re.sub(r"\bspell *[(]", '_oSpellChecker.isValid(', sCode)
sCode = re.sub(r"\bbefore\(\s*", 'look(sSentence[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before(sCode)
sCode = re.sub(r"\bafter\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after(sCode)
sCode = re.sub(r"\bbefore0\(\s*", 'look(sSentence0[:lToken[1+nTokenOffset]["nStart"]], ', sCode) # before0(sCode)
sCode = re.sub(r"\bafter0\(\s*", 'look(sSentence[lToken[nLastToken]["nEnd"]:], ', sCode) # after0(sCode)
sCode = re.sub(r"\banalyseWord[(]", 'analyse(', sCode)
sCode = re.sub(r"[\\](\d+)", 'lToken[nTokenOffset+\\1]["sValue"]', sCode)
sCode = re.sub(r"[\\]-(\d+)", 'lToken[nLastToken-\\1+1]["sValue"]', sCode)
sCode = re.sub(r">1", 'lToken[nLastToken+1]["sValue"]', sCode)
sCode = re.sub(r"<1", 'lToken[nTokenOffset]["sValue"]', sCode)
return sCode
def genTokenLines (sTokenLine, dDef, dDecl):
"tokenize a string and return a list of lines of tokens"
lTokenLines = []
for sTokBlock in sTokenLine.split():
# replace merger characters by spaces
if "␣" in sTokBlock:
sTokBlock = sTokBlock.replace("␣", " ")
# optional token?
bNullPossible = sTokBlock.startswith("?") and sTokBlock.endswith("¿")
if bNullPossible:
sTokBlock = sTokBlock[1:-1]
# token with definition?
if sTokBlock.startswith("({") and sTokBlock.endswith("})") and sTokBlock[1:-1] in dDef:
sTokBlock = "(" + dDef[sTokBlock[1:-1]] + ")"
elif sTokBlock.startswith("{") and sTokBlock.endswith("}") and sTokBlock in dDef:
sTokBlock = dDef[sTokBlock]
if ( (sTokBlock.startswith("[") and sTokBlock.endswith("]")) or (sTokBlock.startswith("([") and sTokBlock.endswith("])")) ):
# multiple token
bSelectedGroup = sTokBlock.startswith("(") and sTokBlock.endswith(")")
if bSelectedGroup:
sTokBlock = sTokBlock[1:-1]
lToken = createTokenList(sTokBlock, dDecl)
if not lTokenLines:
lTokenLines = [ ["("+s+")"] for s in lToken ] if bSelectedGroup else [ [s] for s in lToken ]
if bNullPossible:
lTokenLines.extend([ [] for i in range(len(lToken)+1) ])
else:
lNewTemp = []
if bNullPossible:
for aRule in lTokenLines:
for sElem in lToken:
aNewRule = list(aRule)
aNewRule.append(sElem)
lNewTemp.append(aNewRule)
else:
sElem1 = lToken.pop(0)
for aRule in lTokenLines:
for sElem in lToken:
aNewRule = list(aRule)
aNewRule.append("(" + sElem + ")" if bSelectedGroup else sElem)
lNewTemp.append(aNewRule)
aRule.append("(" + sElem1 + ")" if bSelectedGroup else sElem1)
lTokenLines.extend(lNewTemp)
else:
# simple token
if not lTokenLines:
lTokenLines = [[sTokBlock], []] if bNullPossible else [[sTokBlock]]
else:
if bNullPossible:
lNewTemp = []
for aRule in lTokenLines:
lNew = list(aRule)
lNew.append(sTokBlock)
lNewTemp.append(lNew)
lTokenLines.extend(lNewTemp)
else:
for aRule in lTokenLines:
aRule.append(sTokBlock)
for aRule in lTokenLines:
yield aRule
def createTokenList (sTokBlock, dDeclensions):
"return a list of tokens from a block of tokens"
lToken = []
for sToken in sTokBlock[1:-1].split("|"):
if "+" in sToken and not sToken.startswith("+"):
for sCode in dDeclensions:
if sToken.endswith(sCode):
sToken = sToken[:-len(sCode)]
lToken.append(sToken)
for sSuffix in dDeclensions[sCode]:
lToken.append(sToken+sSuffix)
break
else:
lToken.append(sToken)
return lToken
def createRule (iLine, sRuleName, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef, dDecl):
"generator: create rule as list"
# print(iLine, "//", sRuleName, "//", sTokenLine, "//", sActions, "//", nPriority)
if sTokenLine.startswith("!!") and sTokenLine.endswith("¡¡"):
# antipattern
sTokenLine = sTokenLine[2:-2].strip()
if sRuleName not in dANTIPATTERNS:
dANTIPATTERNS[sRuleName]= []
for lToken in genTokenLines(sTokenLine, dDef, dDecl):
dANTIPATTERNS[sRuleName].append(lToken)
else:
# pattern
for lToken in genTokenLines(sTokenLine, dDef, dDecl):
if sRuleName in dANTIPATTERNS and lToken in dANTIPATTERNS[sRuleName]:
# <lToken> matches an antipattern -> discard
continue
# Calculate positions
dPos = {} # key: iGroup, value: iToken
iGroup = 0
#if iLine == 15818: # debug
# print(" ".join(lToken))
for i, sToken in enumerate(lToken):
if sToken.startswith("(") and sToken.endswith(")"):
lToken[i] = sToken[1:-1]
iGroup += 1
dPos[iGroup] = i + 1 # we add 1, for we count tokens from 1 to n (not from 0)
# Parse actions
for iAction, sAction in enumerate(sActions.split(" <<- ")):
sAction = sAction.strip()
if sAction:
sActionId = sRuleName + "__b" + str(iActionBlock) + "_a" + str(iAction)
aAction = createAction(sActionId, sAction, nPriority, dOptPriority, len(lToken), dPos)
if aAction:
sActionName = storeAction(sActionId, aAction)
lResult = list(lToken)
lResult.extend(["##"+str(iLine), sActionName])
#if iLine == 13341:
# print(" ".join(lToken))
# print(sActionId, aAction)
yield lResult
else:
print(" # Error on action at line:", iLine)
print(sTokenLine, "\n", sActions)
def changeReferenceToken (sText, dPos):
"change group reference in <sText> with values in <dPos>"
if "\\" not in sText:
return sText
for i in range(len(dPos), 0, -1):
|
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
|
checkTokenNumbers(sAction, sActionId, nToken)
if cAction == ">":
## no action, break loop if condition is False
return [sOption, sCondition, cAction, ""]
if not sAction and cAction != "!":
print("# Error in action at line " + sActionId + ": This action is empty.")
if sAction[0:1] != "=" and cAction != "=":
checkIfThereIsCode(sAction, sActionId)
if cAction == "-":
## error detected --> suggestion
if sAction[0:1] == "=":
sAction = createFunction("sugg", sAction, True)
elif sAction.startswith('"') and sAction.endswith('"'):
sAction = sAction[1:-1]
if not sMsg:
print("# Error in action at line " + sActionId + ": The message is empty.")
return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction, cStartLimit, cEndLimit, bCaseSensitivity, nPriority, sMsg, sURL]
if cAction == "~":
## text processor
if sAction[0:1] == "=":
sAction = createFunction("tp", sAction, True)
elif sAction.startswith('"') and sAction.endswith('"'):
sAction = sAction[1:-1]
return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction, bCaseSensitivity]
if cAction in "!/":
## tags
return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction]
if cAction == "=":
## disambiguator
if "define(" in sAction and not re.search(r"define\(\\-?\d+ *, *\[.*\] *\)", sAction):
print("# Error in action at line " + sActionId + ": second argument for <define> must be a list of strings")
sAction = createFunction("da", sAction)
return [sOption, sCondition, cAction, sAction]
print(" # Unknown action.", sActionId)
return None
def make (lRule, dDef, sLang, dOptPriority):
"compile rules, returns a dictionary of values"
# for clarity purpose, don’t create any file here
# removing comments, zeroing empty lines, creating definitions, storing tests, merging rule lines
print(" parsing rules...")
lTokenLine = []
sActions = ""
|
|
|
>
>
>
>
>
>
>
|
|
|
|
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
|
checkTokenNumbers(sAction, sActionId, nToken)
if cAction == ">":
## no action, break loop if condition is False
return [sOption, sCondition, cAction, ""]
if not sAction and cAction != "!":
print("\n# Error in action at line <" + sActionId + ">: This action is empty.")
if sAction[0:1] != "=" and cAction != "=":
checkIfThereIsCode(sAction, sActionId)
if cAction == "-":
## error detected --> suggestion
if sAction[0:1] == "=":
sAction = createFunction("sugg", sAction, True)
elif sAction.startswith('"') and sAction.endswith('"'):
sAction = sAction[1:-1]
if not sMsg:
print("\n# Error in action at line <" + sActionId + ">: The message is empty.")
return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction, cStartLimit, cEndLimit, bCaseSensitivity, nPriority, sMsg, sURL]
if cAction == "~":
## text processor
if sAction[0:1] == "=":
sAction = createFunction("tp", sAction, True)
elif sAction.startswith('"') and sAction.endswith('"'):
sAction = sAction[1:-1]
elif sAction not in "␣*_":
nToken = sAction.count("|") + 1
if iStartAction > 0 and iEndAction > 0:
if (iEndAction - iStartAction + 1) != nToken:
print("\n# Error in action at line <" + sActionId + ">: numbers of modified tokens modified.")
elif iStartAction < 0 or iEndAction < 0 and iStartAction != iEndAction:
print("\n# Warning in action at line <" + sActionName + ">: rewriting with possible token position modified.")
return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction, bCaseSensitivity]
if cAction in "!/":
## tags
return [sOption, sCondition, cAction, sAction, iStartAction, iEndAction]
if cAction == "=":
## disambiguator
if "define(" in sAction and not re.search(r"define\(\\-?\d+ *, *\[.*\] *\)", sAction):
print("\n# Error in action at line <" + sActionId + ">: second argument for <define> must be a list of strings")
sAction = createFunction("da", sAction)
return [sOption, sCondition, cAction, sAction]
print("\n# Unknown action.", sActionId)
return None
def make (lRule, sLang, dDef, dDecl, dOptPriority):
"compile rules, returns a dictionary of values"
# for clarity purpose, don’t create any file here
# removing comments, zeroing empty lines, creating definitions, storing tests, merging rule lines
print(" parsing rules...")
lTokenLine = []
sActions = ""
|
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
|
# processing rules
print(" preparing rules...")
for sGraphName, lRuleLine in dAllGraph.items():
print("{:>8,} rules in {:<24} ".format(len(lRuleLine), "<"+sGraphName+">"), end="")
lPreparedRule = []
for i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority in lRuleLine:
for aRule in createRule(i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef):
lPreparedRule.append(aRule)
# Graph creation
oDARG = darg.DARG(lPreparedRule, sLang)
dAllGraph[sGraphName] = oDARG.createGraph()
# Debugging
if False:
print("\nRULES:")
|
|
|
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
|
# processing rules
print(" preparing rules...")
for sGraphName, lRuleLine in dAllGraph.items():
print("{:>8,} rules in {:<24} ".format(len(lRuleLine), "<"+sGraphName+">"), end="")
lPreparedRule = []
for i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority in lRuleLine:
for aRule in createRule(i, sRuleGroup, sTokenLine, iActionBlock, sActions, nPriority, dOptPriority, dDef, dDecl):
lPreparedRule.append(aRule)
# Graph creation
oDARG = darg.DARG(lPreparedRule, sLang)
dAllGraph[sGraphName] = oDARG.createGraph()
# Debugging
if False:
print("\nRULES:")
|