Overview
Comment: | [graphspell] fix lexicographer |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk | graphspell |
Files: | files | file ages | folders |
SHA3-256: |
6587085132ec6fb23465963330d59edd |
User & Date: | olr on 2020-11-20 17:11:18 |
Other Links: | manifest | tags |
Context
2020-11-20
| ||
17:12 | [fx] lexicographer: small UI update check-in: af453db0ee user: olr tags: trunk, fx | |
17:11 | [graphspell] fix lexicographer check-in: 6587085132 user: olr tags: trunk, graphspell | |
16:45 | [graphspell][fr] lexicographer: update tags [fr] new tag check-in: e4ab0c2fc7 user: olr tags: trunk, fr, graphspell | |
Changes
Modified graphspell-js/spellchecker.js from [09462696fb] to [9161247046].
︙ | ︙ | |||
173 174 175 176 177 178 179 180 181 182 183 | return this.lexicographer.formatTags(sMorph); } setLabelsOnToken (oToken) { if (!this.lexicographer) { return; } if (!oToken.hasOwnProperty("lMorph")) { oToken["lMorph"] = this.getMorph(oToken["sValue"]); } if (oToken["sType"].startsWith("WORD")) { | > > > < | 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 | return this.lexicographer.formatTags(sMorph); } setLabelsOnToken (oToken) { if (!this.lexicographer) { return; } if (oToken["sType"].startsWith("WORD")) { oToken["bValidToken"] = (oToken.hasOwnProperty("lMorph")) ? true : this.isValidToken(oToken["sValue"]); } if (!oToken.hasOwnProperty("lMorph")) { oToken["lMorph"] = this.getMorph(oToken["sValue"]); } if (oToken["sType"].startsWith("WORD")) { let [sPrefix, sStem, sSuffix] = this.lexicographer.split(oToken["sValue"]); if (sStem != oToken["sValue"]) { oToken["lSubTokens"] = [ { "sType": "WORD", "sValue": sPrefix, "lMorph": this.getMorph(sPrefix) }, { "sType": "WORD", "sValue": sStem, "lMorph": this.getMorph(sStem) }, { "sType": "WORD", "sValue": sSuffix, "lMorph": this.getMorph(sSuffix) } ]; |
︙ | ︙ |
Modified graphspell/spellchecker.py from [9b47d651ea] to [d4b9b2fec8].
︙ | ︙ | |||
140 141 142 143 144 145 146 147 148 149 | - aLabels: list of labels (human readable meaning of tags) for WORD tokens: - bValidToken: True if the token is valid for the spellchecker - lSubTokens for each parts of the split token """ if not self.lexicographer: return if "lMorph" not in dToken: dToken["lMorph"] = self.getMorph(dToken["sValue"]) if dToken["sType"].startswith("WORD"): | > > < | 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | - aLabels: list of labels (human readable meaning of tags) for WORD tokens: - bValidToken: True if the token is valid for the spellchecker - lSubTokens for each parts of the split token """ if not self.lexicographer: return if dToken["sType"].startswith("WORD"): dToken["bValidToken"] = True if "lMorph" in dToken else self.isValidToken(dToken["sValue"]) if "lMorph" not in dToken: dToken["lMorph"] = self.getMorph(dToken["sValue"]) if dToken["sType"].startswith("WORD"): sPrefix, sStem, sSuffix = self.lexicographer.split(dToken["sValue"]) if sStem != dToken["sValue"]: dToken["lSubTokens"] = [ { "sType": "WORD", "sValue": sPrefix, "lMorph": self.getMorph(sPrefix) }, { "sType": "WORD", "sValue": sStem, "lMorph": self.getMorph(sStem) }, { "sType": "WORD", "sValue": sSuffix, "lMorph": self.getMorph(sSuffix) } ] |
︙ | ︙ |