Overview
| Comment: | [core] code cleaning (pylint) |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
e052fe47270676f0b3f2f74f3bb04887 |
| User & Date: | olr on 2018-06-24 15:48:44 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-06-24
| ||
| 16:19 | [build] make.py: code cleaning (pylint) check-in: 6839c99323 user: olr tags: build, rg | |
| 15:48 | [core] code cleaning (pylint) check-in: e052fe4727 user: olr tags: core, rg | |
| 14:49 | [core] code cleaning (pylint) check-in: f1726135a3 user: olr tags: core, rg | |
Changes
Modified gc_core/py/lang_core/gc_options.py from [49f53da949] to [c84731594a].
1 2 3 4 5 6 7 | """ Grammar checker default options """ # generated code, do not edit def getUI (sLang): | | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
"""
Grammar checker default options
"""
# generated code, do not edit
def getUI (sLang):
"returns dictionary of UI labels"
if sLang in _dOptLabel:
return _dOptLabel[sLang]
return _dOptLabel["fr"]
def getOptions (sContext="Python"):
"returns dictionary of options"
if sContext in dOpt:
return dOpt[sContext]
return dOpt["Python"]
lStructOpt = ${lStructOpt}
|
| ︙ | ︙ |
Modified gc_core/py/text.py from [9c937ea8e6] to [137c7cc30f].
| ︙ | ︙ | |||
43 44 45 46 47 48 49 |
return ""
lGrammErrs = sorted(aGrammErrs, key=lambda d: d["nStart"])
lSpellErrs = sorted(aSpellErrs, key=lambda d: d['nStart'])
sText = ""
nOffset = 0
for sLine in wrap(sParagraph, nWidth): # textwrap.wrap(sParagraph, nWidth, drop_whitespace=False)
sText += sLine + "\n"
| | | | | | 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
return ""
lGrammErrs = sorted(aGrammErrs, key=lambda d: d["nStart"])
lSpellErrs = sorted(aSpellErrs, key=lambda d: d['nStart'])
sText = ""
nOffset = 0
for sLine in wrap(sParagraph, nWidth): # textwrap.wrap(sParagraph, nWidth, drop_whitespace=False)
sText += sLine + "\n"
nLineLen = len(sLine)
sErrLine = ""
nLenErrLine = 0
nGrammErr = 0
nSpellErr = 0
for dErr in lGrammErrs:
nStart = dErr["nStart"] - nOffset
if nStart < nLineLen:
nGrammErr += 1
if nStart >= nLenErrLine:
sErrLine += " " * (nStart - nLenErrLine) + "^" * (dErr["nEnd"] - dErr["nStart"])
nLenErrLine = len(sErrLine)
else:
break
for dErr in lSpellErrs:
nStart = dErr['nStart'] - nOffset
if nStart < nLineLen:
nSpellErr += 1
nEnd = dErr['nEnd'] - nOffset
if nEnd > len(sErrLine):
sErrLine += " " * (nEnd - len(sErrLine))
sErrLine = sErrLine[:nStart] + "°" * (nEnd - nStart) + sErrLine[nEnd:]
else:
break
if sErrLine:
sText += sErrLine + "\n"
if nGrammErr:
sText += getReadableErrors(lGrammErrs[:nGrammErr], nWidth)
del lGrammErrs[0:nGrammErr]
if nSpellErr:
sText += getReadableErrors(lSpellErrs[:nSpellErr], nWidth, True)
del lSpellErrs[0:nSpellErr]
nOffset += nLineLen
return sText
def getReadableErrors (lErrs, nWidth, bSpell=False):
"Returns lErrs errors as readable errors"
sErrors = ""
for dErr in lErrs:
|
| ︙ | ︙ | |||
97 98 99 100 101 102 103 |
return sErrors
def getReadableError (dErr, bSpell=False):
"Returns an error dErr as a readable error"
try:
if bSpell:
| | | | | | | | 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
return sErrors
def getReadableError (dErr, bSpell=False):
"Returns an error dErr as a readable error"
try:
if bSpell:
sText = u"* {nStart}:{nEnd} # {sValue}:".format(**dErr)
else:
sText = u"* {nStart}:{nEnd} # {sLineId} / {sRuleId}:\n".format(**dErr)
sText += " " + dErr.get("sMessage", "# error : message not found")
if dErr.get("aSuggestions", None):
sText += "\n > Suggestions : " + " | ".join(dErr.get("aSuggestions", "# error : suggestions not found"))
if dErr.get("URL", None):
sText += "\n > URL: " + dErr["URL"]
return sText
except KeyError:
return u"* Non-compliant error: {}".format(dErr)
def createParagraphWithLines (lLine):
"Returns a text as merged lines and a set of data about lines (line_number_y, start_x, end_x)"
sText = ""
|
| ︙ | ︙ |
Modified gc_lang/fr/modules/conj.py from [0febaa97ab] to [258383e97f].
| ︙ | ︙ | |||
171 172 173 174 175 176 177 |
self._sRawInfo = getVtyp(sVerbPattern)
self.sInfo = self._readableInfo()
self.bProWithEn = (self._sRawInfo[5] == "e")
self._tTags = _getTags(sVerbPattern)
if not self._tTags:
raise ValueError("Unknown verb.")
self._tTagsAux = _getTags(self.sVerbAux)
| | | 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 |
self._sRawInfo = getVtyp(sVerbPattern)
self.sInfo = self._readableInfo()
self.bProWithEn = (self._sRawInfo[5] == "e")
self._tTags = _getTags(sVerbPattern)
if not self._tTags:
raise ValueError("Unknown verb.")
self._tTagsAux = _getTags(self.sVerbAux)
self.cGroup = self._sRawInfo[0]
self.dConj = {
":Y": {
"label": "Infinitif",
":": sVerb,
},
":P": {
"label": "Participe présent",
|
| ︙ | ︙ |
Modified gc_lang/fr/modules/cregex.py from [f9eb3691d3] to [4b9e99ff72].
| ︙ | ︙ | |||
99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
if mbMas(l1) and not mbMas(l2):
return False
if mbFem(l1) and not mbFem(l2):
return False
return True
def checkConjVerb (lMorph, sReqConj):
return any(sReqConj in s for s in lMorph)
def getGender (lMorph):
"returns gender of word (':m', ':f', ':e' or empty string)."
sGender = ""
for sMorph in lMorph:
m = Gender.search(sMorph)
| > | 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
if mbMas(l1) and not mbMas(l2):
return False
if mbFem(l1) and not mbFem(l2):
return False
return True
def checkConjVerb (lMorph, sReqConj):
"returns True if <sReqConj> in <lMorph>"
return any(sReqConj in s for s in lMorph)
def getGender (lMorph):
"returns gender of word (':m', ':f', ':e' or empty string)."
sGender = ""
for sMorph in lMorph:
m = Gender.search(sMorph)
|
| ︙ | ︙ | |||
195 196 197 198 199 200 201 |
def mbNomAdj (lMorph):
"returns True if one morphology is “nom” or “adjectif”"
return any(NA.search(s) for s in lMorph)
def mbNomNotAdj (lMorph):
"returns True if one morphology is “nom”, but not “adjectif”"
| | | | | 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
def mbNomAdj (lMorph):
"returns True if one morphology is “nom” or “adjectif”"
return any(NA.search(s) for s in lMorph)
def mbNomNotAdj (lMorph):
"returns True if one morphology is “nom”, but not “adjectif”"
bResult = False
for s in lMorph:
if ":A" in s:
return False
if ":N" in s:
bResult = True
return bResult
def mbPpasNomNotAdj (lMorph):
"returns True if one morphology is “nom” or “participe passé”, but not “adjectif”"
return any(PNnotA.search(s) for s in lMorph)
def mbVconj (lMorph):
"returns True if one morphology is “nom” or “verbe conjugué”"
|
| ︙ | ︙ |
Modified gc_lang/fr/modules/lexicographe.py from [a9b878f391] to [175c38852d].
| ︙ | ︙ | |||
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
"m'en": " (me) pronom personnel objet + (en) pronom adverbial",
"t'en": " (te) pronom personnel objet + (en) pronom adverbial",
"s'en": " (se) pronom personnel objet + (en) pronom adverbial",
}
class Lexicographe:
def __init__ (self, oSpellChecker):
self.oSpellChecker = oSpellChecker
self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
self._zTag = re.compile("[:;/][\\w*][^:;/]*")
def analyzeWord (self, sWord):
try:
if not sWord:
return (None, None)
if sWord.count("-") > 4:
return (["élément complexe indéterminé"], None)
if sWord.isdigit():
return (["nombre"], None)
| > > | 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
"m'en": " (me) pronom personnel objet + (en) pronom adverbial",
"t'en": " (te) pronom personnel objet + (en) pronom adverbial",
"s'en": " (se) pronom personnel objet + (en) pronom adverbial",
}
class Lexicographe:
"Lexicographer - word analyzer"
def __init__ (self, oSpellChecker):
self.oSpellChecker = oSpellChecker
self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
self._zTag = re.compile("[:;/][\\w*][^:;/]*")
def analyzeWord (self, sWord):
"returns a tuple (a list of morphologies, a set of verb at infinitive form)"
try:
if not sWord:
return (None, None)
if sWord.count("-") > 4:
return (["élément complexe indéterminé"], None)
if sWord.isdigit():
return (["nombre"], None)
|
| ︙ | ︙ | |||
193 194 195 196 197 198 199 |
aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
else:
aMorph.append( "{} : inconnu du dictionnaire".format(sWord) )
# suffixe d’un mot composé
if m2:
aMorph.append( "-{} : {}".format(m2.group(2), self._formatSuffix(m2.group(2).lower())) )
# Verbes
| | > | 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
else:
aMorph.append( "{} : inconnu du dictionnaire".format(sWord) )
# suffixe d’un mot composé
if m2:
aMorph.append( "-{} : {}".format(m2.group(2), self._formatSuffix(m2.group(2).lower())) )
# Verbes
aVerb = set([ s[1:s.find("/")] for s in lMorph if ":V" in s ])
return (aMorph, aVerb)
except:
traceback.print_exc()
return (["#erreur"], None)
def formatTags (self, sTags):
"returns string: readable tags"
sRes = ""
sTags = re.sub("(?<=V[1-3])[itpqnmr_eaxz]+", "", sTags)
sTags = re.sub("(?<=V0[ea])[itpqnmr_eaxz]+", "", sTags)
for m in self._zTag.finditer(sTags):
sRes += _dTAGS.get(m.group(0), " [{}]".format(m.group(0)))[0]
if sRes.startswith(" verbe") and not sRes.endswith("infinitif"):
sRes += " [{}]".format(sTags[1:sTags.find(" ")])
|
| ︙ | ︙ |