Grammalecte  Check-in [eef32ad83f]

Overview
Comment:[fx] lexicographer: count paragraphs analyzed
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | fx | webext2
Files: files | file ages | folders
SHA3-256: eef32ad83f8e38eba2e0b95c15e1a52bb982cf6c6f63f3a39d1c8e20463bbe86
User & Date: olr on 2017-08-15 08:36:29
Other Links: branch diff | manifest | tags
Context
2017-08-15
08:41
[fx] movable panel height depending on window height check-in: 75f1b73645 user: olr tags: fx, webext2
08:36
[fx] lexicographer: count paragraphs analyzed check-in: eef32ad83f user: olr tags: fx, webext2
08:12
[core][js][py] text.getParagraph(): end of line handling check-in: ec919db910 user: olr tags: core, webext2
Changes

Modified gc_lang/fr/webext/content_scripts/content_panels.css from [8844c08781] to [87d3446a4a].

113
114
115
116
117
118
119








120
121
122
123
124
125
126
127

.grammalecte_lxg_list_of_tokens {
    margin: 10px 0;
    padding: 10px;
    background-color: hsla(0, 0%, 96%, 1);
    border-radius: 2px;
}









.grammalecte_token  {
    padding: 4px 0;
}
.grammalecte_token .separator {
    margin: 20px 0;
    padding: 5px 5px;
    background-color: hsla(0, 0%, 75%, 1);







>
>
>
>
>
>
>
>
|







113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

.grammalecte_lxg_list_of_tokens {
    margin: 10px 0;
    padding: 10px;
    background-color: hsla(0, 0%, 96%, 1);
    border-radius: 2px;
}
.grammalecte_lxg_list_of_tokens .num {
    float: right;
    margin: -12px 0 5px 10px;
    padding: 5px 10px;
    font-weight: bold;
    border-radius: 0 0 4px 4px;
    background-color: hsl(0, 50%, 50%);
    color: hsl(0, 10%, 96%);
}
.grammalecte_token  {
    padding: 4px 0;
}
.grammalecte_token .separator {
    margin: 20px 0;
    padding: 5px 5px;
    background-color: hsla(0, 0%, 75%, 1);

Modified gc_lang/fr/webext/content_scripts/lxg_content.js from [196128af47] to [7b81590ebf].

1
2
3
4
5
6
7


8
9
10
11
12
13

14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

32

33
34
35
36
37
38
39
// JavaScript

"use strict";

const oLxgPanelContent = {

    _xContentNode: createNode("div", {id: "grammalecte_lxg_panel_content"}),



    getNode: function () {
        return this._xContentNode;
    },

    clear: function () {

        while (this._xContentNode.firstChild) {
            this._xContentNode.removeChild(this._xContentNode.firstChild);
        }
    },

    addSeparator: function (sText) {
        if (this._xContentNode.textContent !== "") {
            this._xContentNode.appendChild(createNode("div", {className: "grammalecte_lxg_separator", textContent: sText}));
        }
    },

    addMessage: function (sClass, sText) {
        this._xContentNode.appendChild(createNode("div", {className: sClass, textContent: sText}));
    },

    addListOfTokens: function (lTokens) {
        try {
            if (lTokens) {

                let xNodeDiv = createNode("div", {className: "grammalecte_lxg_list_of_tokens"});

                for (let oToken of lTokens) {
                    xNodeDiv.appendChild(this._createTokenNode(oToken));
                }
                this._xContentNode.appendChild(xNodeDiv);
            }
        }
        catch (e) {







>
>






>


















>

>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
// JavaScript

"use strict";

const oLxgPanelContent = {

    _xContentNode: createNode("div", {id: "grammalecte_lxg_panel_content"}),

    _nCount: 0,

    getNode: function () {
        return this._xContentNode;
    },

    clear: function () {
        this._nCount = 0;
        while (this._xContentNode.firstChild) {
            this._xContentNode.removeChild(this._xContentNode.firstChild);
        }
    },

    addSeparator: function (sText) {
        if (this._xContentNode.textContent !== "") {
            this._xContentNode.appendChild(createNode("div", {className: "grammalecte_lxg_separator", textContent: sText}));
        }
    },

    addMessage: function (sClass, sText) {
        this._xContentNode.appendChild(createNode("div", {className: sClass, textContent: sText}));
    },

    addListOfTokens: function (lTokens) {
        try {
            if (lTokens) {
                this._nCount += 1;
                let xNodeDiv = createNode("div", {className: "grammalecte_lxg_list_of_tokens"});
                xNodeDiv.appendChild(createNode("div", {className: "num", textContent: this._nCount}));
                for (let oToken of lTokens) {
                    xNodeDiv.appendChild(this._createTokenNode(oToken));
                }
                this._xContentNode.appendChild(xNodeDiv);
            }
        }
        catch (e) {

Modified gc_lang/fr/webext/gce_worker.js from [64558676c7] to [6354516b05].

247
248
249
250
251
252
253

254
255
256
257
258
259
260
261
262

263
264
265
266
267
268
269
270


// Lexicographer

function getListOfTokens (sText, dInfo={}) {
    try {
        for (let sParagraph of text.getParagraph(sText)) {

            let aElem = [];
            let aRes = null;
            for (let oToken of oTokenizer.genTokens(sParagraph)) {
                aRes = oLxg.getInfoForToken(oToken);
                if (aRes) {
                    aElem.push(aRes);
                }
            }
            postMessage(createResponse("getListOfTokens", aElem, dInfo, false));

        }
        postMessage(createResponse("getListOfTokens", null, dInfo, true));
    }
    catch (e) {
        helpers.logerror(e);
        postMessage(createResponse("getListOfTokens", createErrorResult(e, "no tokens"), dInfo, true, true));
    }
}







>
|
|
|
|
|
|
|
|
|
>








247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272


// Lexicographer

function getListOfTokens (sText, dInfo={}) {
    try {
        for (let sParagraph of text.getParagraph(sText)) {
            if (sParagraph.trim() !== "") {
                let aElem = [];
                let aRes = null;
                for (let oToken of oTokenizer.genTokens(sParagraph)) {
                    aRes = oLxg.getInfoForToken(oToken);
                    if (aRes) {
                        aElem.push(aRes);
                    }
                }
                postMessage(createResponse("getListOfTokens", aElem, dInfo, false));
            }
        }
        postMessage(createResponse("getListOfTokens", null, dInfo, true));
    }
    catch (e) {
        helpers.logerror(e);
        postMessage(createResponse("getListOfTokens", createErrorResult(e, "no tokens"), dInfo, true, true));
    }
}