1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
// JavaScript
// Very simple tokenizer
/*jslint esversion: 6*/
/*global require,exports*/
"use strict";
if (typeof(require) !== 'undefined') {
var helpers = require("resource://grammalecte/graphspell/helpers.js");
}
const aTkzPatterns = {
// All regexps must start with ^.
"default":
[
[/^[ \t]+/, 'SPACE'],
[/^\/(?:~|bin|boot|dev|etc|home|lib|mnt|opt|root|sbin|tmp|usr|var|Bureau|Documents|Images|Musique|Public|Téléchargements|Vidéos)(?:\/[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_.()-]+)*/, 'FOLDERUNIX'],
|
<
<
<
<
<
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
// JavaScript
// Very simple tokenizer
/*jslint esversion: 6*/
/*global require,exports*/
"use strict";
const aTkzPatterns = {
// All regexps must start with ^.
"default":
[
[/^[ \t]+/, 'SPACE'],
[/^\/(?:~|bin|boot|dev|etc|home|lib|mnt|opt|root|sbin|tmp|usr|var|Bureau|Documents|Images|Musique|Public|Téléchargements|Vidéos)(?:\/[a-zA-Zà-öÀ-Ö0-9ø-ÿØ-ßĀ-ʯfi-st_.()-]+)*/, 'FOLDERUNIX'],
|
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
this.sLang = sLang;
if (!aTkzPatterns.hasOwnProperty(sLang)) {
this.sLang = "default";
}
this.aRules = aTkzPatterns[this.sLang];
}
* genTokens (sText) {
let m;
let iNext = 0;
while (sText) {
let iCut = 1;
let iToken = 0;
for (let [zRegex, sType] of this.aRules) {
try {
if ((m = zRegex.exec(sText)) !== null) {
iToken += 1;
yield { "i": iToken, "sType": sType, "sValue": m[0], "nStart": iNext, "nEnd": iNext + m[0].length }
iCut = m[0].length;
break;
}
}
catch (e) {
helpers.logerror(e);
}
}
iNext += iCut;
sText = sText.slice(iCut);
}
}
}
if (typeof(exports) !== 'undefined') {
exports.Tokenizer = Tokenizer;
}
|
|
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
>
>
>
>
|
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
|
this.sLang = sLang;
if (!aTkzPatterns.hasOwnProperty(sLang)) {
this.sLang = "default";
}
this.aRules = aTkzPatterns[this.sLang];
}
* genTokens (sText, bStartEndToken=false, bWithSpaces=false) {
let m;
let iToken;
let iNext = 0;
let iEnd = sText.length;
if (bStartEndToken) {
yield { "i": 0, "sType": "INFO", "sValue": "<start>", "nStart": 0, "nEnd": 0, "lMorph": ["<start>"] };
}
while (sText) {
let iCut = 1;
let iToken = 0;
for (let [zRegex, sType] of this.aRules) {
if (sType !== "SPACE" || bWithSpaces) {
try {
if ((m = zRegex.exec(sText)) !== null) {
iToken += 1;
yield { "i": iToken, "sType": sType, "sValue": m[0], "nStart": iNext, "nEnd": iNext + m[0].length }
iCut = m[0].length;
break;
}
}
catch (e) {
console.error(e);
}
}
}
iNext += iCut;
sText = sText.slice(iCut);
}
if (bStartEndToken) {
yield { "i": iToken+1, "sType": "INFO", "sValue": "<end>", "nStart": iEnd, "nEnd": iEnd, "lMorph": ["<end>"] };
}
}
}
if (typeof(exports) !== 'undefined') {
exports.Tokenizer = Tokenizer;
}
|