id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,800
|
dictdlib.py
|
ilius_pyglossary/pyglossary/plugin_lib/dictdlib.py
|
# -*- coding: utf-8 -*-
#
# Dictionary creation library
# Copyright (C) 2002 John Goerzen <jgoerzen@complete.org>
# Copyright (C) 2020 Saeed Rasooli
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from __future__ import annotations
import gzip
import os
import string
import sys
import typing
if typing.TYPE_CHECKING:
import io
from collections.abc import Iterable
__all__ = ["DictDB"]
b64_list = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
url_headword = "00-database-url"
short_headword = "00-database-short"
info_headword = "00-database-info"
validdict = set(
string.ascii_letters + string.digits + " \t",
)
def b64_encode(val: int) -> str:
"""
Takes as input an integer val and returns a string of it encoded
with the base64 algorithm used by dict indexes.
"""
startfound = 0
retval = ""
for i in range(5, -1, -1):
thispart = (val >> (6 * i)) & ((2**6) - 1)
if (not startfound) and (not thispart):
# Both zero -- keep going.
continue
startfound = 1
retval += b64_list[thispart]
if retval:
return retval
return b64_list[0]
def b64_decode(text: str) -> int:
"""
Takes as input a string and returns an integer value of it decoded
with the base64 algorithm used by dict indexes.
"""
if not text:
return 0
retval = 0
shiftval = 0
for i in range(len(text) - 1, -1, -1):
val = b64_list.index(text[i])
retval |= val << shiftval
shiftval += 6
return retval
def sortNormalize(inp: str) -> str:
"""
Returns a value such that x is mapped to a format that sorts properly
with standard comparison.
"""
st2 = ""
for char in inp:
if char in validdict:
st2 += char
return st2.upper() + "\0" + inp.upper()
def sortKey(x: str) -> list[str]:
"""Emulate sort -df."""
return x.split("\0")
class DictDB:
def __init__(
self,
basename: str,
mode: str = "read",
quiet: int = 0,
) -> None:
# url = 'unknown', shortname = 'unknown',
# longinfo = 'unknown', quiet = 0):
"""
Initialize a DictDB object.
Mode must be one of:
read -- read-only access
write -- write-only access, truncates existing files, does not work
with .dz. dict created if nonexistent.
update -- read/write access, dict created if nonexistent. Does not
work with .dz.
Read can read dict or dict.dz files. Write and update will NOT work
with dict.dz files.
If quiet is nonzero, status messages
will be suppressed.
"""
self.mode = mode
self.quiet = quiet
self.indexEntries: "dict[str, list[tuple[int, int]]]" = {}
# indexEntries[word] is a list of (start: int, size: int)
self.count = 0
self.basename = basename
self.indexFilename = self.basename + ".index"
if mode == "read" and os.path.isfile(self.basename + ".dict.dz"):
self.useCompression = 1
else:
self.useCompression = 0
self.dictFilename = (
self.basename + ".dict" + (".dz" if self.useCompression else "")
)
self.dictFile: "io.IOBase"
self.indexFile: "io.IOBase"
self._open(mode)
# self.writeentry(url_headword + "\n " + url, [url_headword])
# self.writeentry(short_headword + "\n " + shortname, [short_headword])
# self.writeentry(info_headword + "\n" + longinfo, [info_headword])
def _open(self, mode: str) -> None:
if mode == "read":
self.indexFile = open(self.indexFilename, "rb")
if self.useCompression:
self.dictFile = gzip.GzipFile(self.dictFilename, "rb")
else:
self.dictFile = open(self.dictFilename, "rb")
self._initIndex()
elif mode == "write":
self.indexFile = open(self.indexFilename, "wb")
if self.useCompression:
raise ValueError("'write' mode incompatible with .dz files")
self.dictFile = open(self.dictFilename, "wb")
elif mode == "update":
self._openForUpdate()
else:
raise ValueError("mode must be 'read', 'write', or 'update'")
def _openForUpdate(self) -> None:
try:
self.indexFile = open(self.indexFilename, "r+b")
except OSError:
self.indexFile = open(self.indexFilename, "w+b")
if self.useCompression:
# Open it read-only since we don't support mods.
self.dictFile = gzip.GzipFile(self.dictFilename, "rb")
else:
try:
self.dictFile = open(self.dictFilename, "r+b")
except OSError:
self.dictFile = open(self.dictFilename, "w+b")
self._initIndex()
def __len__(self) -> int:
return len(self.indexEntries)
def _initIndex(self) -> None:
"""Load the entire index off disk into memory."""
self.indexFile.seek(0)
for line in self.indexFile:
parts = line.decode("utf-8").rstrip().split("\t")
if parts[0] not in self.indexEntries:
self.indexEntries[parts[0]] = []
self.indexEntries[parts[0]].append(
(
b64_decode(parts[1]),
b64_decode(parts[2]),
),
)
def addIndexEntry(
self,
word: str,
start: int,
size: int,
) -> None:
"""
Adds an entry to the index. word is the relevant word.
start is the starting position in the dictionary and size is the
size of the definition; both are integers.
"""
if word not in self.indexEntries:
self.indexEntries[word] = []
self.indexEntries[word].append((start, size))
def deleteIndexEntry(
self,
word: str,
start: "int | None" = None,
size: "int | None" = None,
) -> int:
"""
Removes an entry from the index; word is the word to search for.
start and size are optional. If they are specified, only index
entries matching the specified values will be removed.
For instance, if word is "foo" and start and size are not specified,
all index entries for the word foo will be removed. If start and size
are specified, only those entries matching all criteria will be
removed.
This function does not actually remove the data from the .dict file.
Therefore, information removed by this function will still
exist on-disk in the .dict file, but the dict server will just
not "see" it -- there will be no way to get to it anymore.
Returns a count of the deleted entries.
"""
if word not in self.indexEntries:
return 0
retval = 0
entrylist = self.indexEntries[word]
for i in range(len(entrylist) - 1, -1, -1):
# Go backwards so the del doesn't effect the index.
if (start is None or start == entrylist[i][0]) and (
size is None or size == entrylist[i][1]
):
del entrylist[i]
retval += 1
if not entrylist: # if we emptied it, del it completely
del self.indexEntries[word]
return retval
def update(self, text: str) -> None:
"""Writes string out, if not quiet."""
if not self.quiet:
sys.stdout.write(text)
sys.stdout.flush()
def setUrl(self, url: str) -> None:
"""
Sets the URL attribute of this database. If there was
already a URL specified, we will use deleteIndexEntry() on it
first.
"""
self.deleteIndexEntry(url_headword)
self.addEntry(url_headword + "\n " + url, [url_headword])
def setShortName(self, shortname: str) -> None:
"""
Sets the shortname for this database. If there was already
a shortname specified, we will use deleteIndexEntry() on it first.
"""
self.deleteIndexEntry(short_headword)
self.addEntry(
short_headword + "\n " + shortname,
[short_headword],
)
def setLongInfo(self, longinfo: str) -> None:
"""
Sets the extended information for this database. If there was
already long info specified, we will use deleteIndexEntry() on it
first.
"""
self.deleteIndexEntry(info_headword)
self.addEntry(info_headword + "\n" + longinfo, [info_headword])
def addEntry(
self,
s_defi: str,
headwords: list[str],
) -> None:
r"""
Writes an entry. defstr holds the content of the definition.
headwords is a list specifying one or more words under which this
definition should be indexed. This function always adds \n
to the end of defstr.
"""
self.dictFile.seek(0, 2) # Seek to end of file
start = self.dictFile.tell()
s_defi += "\n"
b_defi = s_defi.encode("utf-8")
self.dictFile.write(b_defi)
for word in headwords:
self.addIndexEntry(word, start, len(b_defi))
self.count += 1
if self.count % 1000 == 0:
self.update(f"Processed {self.count} records\r")
def finish(self, dosort: bool = True) -> None:
"""
Called to finish the writing process.
**REQUIRED IF OPENED WITH 'update' OR 'write' MODES**.
This will write the index and close the files.
dosort is optional and defaults to true. If set to false,
dictlib will not sort the index file. In this case, you
MUST manually sort it through "sort -df" before it can be used.
"""
self.update(f"Processed {self.count} records.\n")
if dosort:
self.update("Sorting index: converting")
indexList: list[str] = [
f"{word}\t{b64_encode(thisdef[0])}\t{b64_encode(thisdef[1])}"
for word, defs in self.indexEntries.items()
for thisdef in defs
]
self.update(" mapping")
sortmap: "dict[str, list[str]]" = {}
for entry in indexList:
norm = sortNormalize(entry)
if norm in sortmap:
sortmap[norm].append(entry)
sortmap[norm].sort(key=sortKey)
else:
sortmap[norm] = [entry]
self.update(" listing")
normalizedentries = list(sortmap)
self.update(" sorting")
normalizedentries.sort()
self.update(" re-mapping")
indexList = []
for normentry in normalizedentries:
for entry in sortmap[normentry]:
indexList.append(entry)
self.update(", done.\n")
self.update("Writing index...\n")
self.indexFile.seek(0)
for entry in indexList:
self.indexFile.write(entry.encode("utf-8") + b"\n")
if self.mode == "update":
# In case things were deleted
self.indexFile.truncate()
self.close()
self.update("Complete.\n")
def close(self) -> None:
self.indexFile.close()
self.dictFile.close()
def getDefList(self) -> Iterable[str]:
"""
Returns a list of strings naming all definitions contained
in this dictionary.
"""
return self.indexEntries.keys()
def hasDef(self, word: str) -> bool:
return word in self.indexEntries
def getDef(self, word: str) -> list[bytes]:
"""
Given a definition name, returns a list of strings with all
matching definitions. This is an *exact* match, not a
case-insensitive one. Returns [] if word is not in the dictionary.
"""
retval: list[bytes] = []
if not self.hasDef(word):
return retval
for start, length in self.indexEntries[word]:
self.dictFile.seek(start)
retval.append(self.dictFile.read(length))
return retval
# print("------------------------ ", __name__)
if __name__ == "__main__":
db = DictDB("test")
print(db) # noqa: T201
| 11,149
|
Python
|
.py
| 341
| 29.560117
| 77
| 0.698259
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,801
|
ebook_length3.py
|
ilius_pyglossary/pyglossary/sort_modules/ebook_length3.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
from pyglossary.sort_modules import ebook
if TYPE_CHECKING:
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "E-Book (prefix length: 3)"
def normal(sortEncoding: str = "utf-8", **_options) -> SortKeyType:
return ebook.normal(
sortEncoding=sortEncoding,
group_by_prefix_length=3,
)
def sqlite(
sortEncoding: str = "utf-8",
**_options,
) -> SQLiteSortKeyType:
return ebook.sqlite(
sortEncoding=sortEncoding,
group_by_prefix_length=3,
)
| 544
|
Python
|
.py
| 19
| 26.368421
| 67
| 0.771318
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,802
|
dicformids.py
|
ilius_pyglossary/pyglossary/sort_modules/dicformids.py
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "DictionaryForMIDs"
def normal(**_options) -> SortKeyType:
re_punc = re.compile(
r"""[!"$§%&/()=?´`\\{}\[\]^°+*~#'\-_.:,;<>@|]*""", # noqa: RUF001
)
re_spaces = re.compile(" +")
re_tabs = re.compile("\t+")
def sortKey(words: list[str]) -> str:
word = words[0]
word = word.strip()
word = re_punc.sub("", word)
word = re_spaces.sub(" ", word)
word = re_tabs.sub(" ", word)
word = word.lower()
return word # noqa: RET504
return sortKey
def sqlite(**options) -> SQLiteSortKeyType:
return [
(
"headword_norm",
"TEXT",
normal(**options),
),
]
| 753
|
Python
|
.py
| 29
| 23.206897
| 68
| 0.632911
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,803
|
stardict.py
|
ilius_pyglossary/pyglossary/sort_modules/stardict.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "StarDict"
def normal(sortEncoding: str = "utf-8", **_options) -> SortKeyType:
def sortKey(words: list[str]) -> tuple[bytes, bytes]:
b_word = words[0].encode(sortEncoding, errors="replace")
return (b_word.lower(), b_word)
return sortKey
def sqlite(sortEncoding: str = "utf-8", **_options) -> SQLiteSortKeyType:
def headword_lower(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace").lower()
def headword(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace")
_type = "TEXT" if sortEncoding == "utf-8" else "BLOB"
return [
(
"headword_lower",
_type,
headword_lower,
),
(
"headword",
_type,
headword,
),
]
| 868
|
Python
|
.py
| 28
| 28.107143
| 73
| 0.705669
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,804
|
headword_lower.py
|
ilius_pyglossary/pyglossary/sort_modules/headword_lower.py
|
from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyglossary.icu_types import T_Collator
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "Lowercase Headword"
def normal(sortEncoding: str = "utf-8", **_options) -> SortKeyType:
def sortKey(words: list[str]) -> bytes:
return words[0].lower().encode(sortEncoding, errors="replace")
return sortKey
def locale(
collator: "T_Collator", # noqa: F821
) -> SortKeyType:
cSortKey = collator.getSortKey
def sortKey(words: list[str]) -> bytes:
return cSortKey(words[0].lower())
return lambda **_options: sortKey
def sqlite(
sortEncoding: str = "utf-8",
**_options,
) -> SQLiteSortKeyType:
def sortKey(words: list[str]) -> bytes:
return words[0].lower().encode(sortEncoding, errors="replace")
return [
(
"headword_lower",
"TEXT" if sortEncoding == "utf-8" else "BLOB",
sortKey,
),
]
def sqlite_locale(
collator: "T_Collator", # noqa: F821
) -> Callable[..., SQLiteSortKeyType]:
cSortKey = collator.getSortKey
def sortKey(words: list[str]) -> bytes:
return cSortKey(words[0].lower())
return lambda **_options: [("sortkey", "BLOB", sortKey)]
| 1,233
|
Python
|
.py
| 38
| 29.947368
| 67
| 0.727891
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,805
|
headword_bytes_lower.py
|
ilius_pyglossary/pyglossary/sort_modules/headword_bytes_lower.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "ASCII-Lowercase Headword"
def normal(
sortEncoding: str = "utf-8",
**_options,
) -> SortKeyType:
def sortKey(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace").lower()
return sortKey
# def locale(
# collator: "T_Collator", # noqa: F821
# ) -> SortKeyType:
# raise NotImplementedError("")
def sqlite(sortEncoding: str = "utf-8", **_options) -> SQLiteSortKeyType:
def sortKey(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace").lower()
return [
(
"headword_blower",
"TEXT" if sortEncoding == "utf-8" else "BLOB",
sortKey,
),
]
| 782
|
Python
|
.py
| 26
| 27.653846
| 73
| 0.716398
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,806
|
ebook.py
|
ilius_pyglossary/pyglossary/sort_modules/ebook.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
__all__ = ["normal", "sqlite"]
desc = "E-Book (prefix length: 2)"
def normal(
sortEncoding: str = "utf-8", # noqa: ARG001
**options,
) -> SortKeyType:
length = options.get("group_by_prefix_length", 2)
# FIXME: return bytes
def sortKey(words: list[str]) -> tuple[str, str]:
word = words[0]
if not word:
return "", ""
prefix = word[:length].lower()
if prefix[0] < "a":
return "SPECIAL", word
return prefix, word
return sortKey
def sqlite(sortEncoding: str = "utf-8", **options) -> SQLiteSortKeyType:
length = options.get("group_by_prefix_length", 2)
def getPrefix(words: list[str]) -> str:
word = words[0]
if not word:
return ""
prefix = word[:length].lower()
if prefix[0] < "a":
return "SPECIAL"
return prefix
def headword(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace")
_type = "TEXT" if sortEncoding == "utf-8" else "BLOB"
return [
(
"prefix",
_type,
getPrefix,
),
(
"headword",
_type,
headword,
),
]
| 1,171
|
Python
|
.py
| 46
| 22.565217
| 72
| 0.671467
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,807
|
headword.py
|
ilius_pyglossary/pyglossary/sort_modules/headword.py
|
from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyglossary.icu_types import T_Collator
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "Headword"
def normal(sortEncoding: str = "utf-8", **_options) -> SortKeyType:
def sortKey(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace")
return sortKey
def locale(
collator: "T_Collator", # noqa: F821
) -> SortKeyType:
cSortKey = collator.getSortKey
def sortKey(words: list[str]) -> bytes:
return cSortKey(words[0])
return lambda **_options: sortKey
def sqlite(sortEncoding: str = "utf-8", **_options) -> SQLiteSortKeyType:
def sortKey(words: list[str]) -> bytes:
return words[0].encode(sortEncoding, errors="replace")
return [
(
"headword",
"TEXT" if sortEncoding == "utf-8" else "BLOB",
sortKey,
),
]
def sqlite_locale(
collator: "T_Collator", # noqa: F821
) -> Callable[..., SQLiteSortKeyType]:
cSortKey = collator.getSortKey
def sortKey(words: list[str]) -> bytes:
return cSortKey(words[0])
return lambda **_options: [("sortkey", "BLOB", sortKey)]
| 1,180
|
Python
|
.py
| 35
| 31.142857
| 73
| 0.730018
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,808
|
random.py
|
ilius_pyglossary/pyglossary/sort_modules/random.py
|
from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyglossary.icu_types import T_Collator
from .sort_keys_types import SortKeyType, SQLiteSortKeyType
desc = "Random"
def normal(**_options) -> SortKeyType:
from random import random
return lambda _words: random()
def locale(
_collator: "T_Collator", # noqa: F821
) -> SortKeyType:
from random import random
return lambda **_options: lambda _words: random()
def sqlite(**_options) -> SQLiteSortKeyType:
from random import random
return [
(
"random",
"REAL",
lambda _words: random(),
),
]
def sqlite_locale(
_collator: "T_Collator", # noqa: F821
**_options,
) -> Callable[..., SQLiteSortKeyType]:
from random import random
return lambda **_opt: [
(
"random",
"REAL",
lambda _words: random(),
),
]
| 881
|
Python
|
.py
| 36
| 21.861111
| 60
| 0.722222
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,809
|
iupac_goldbook.py
|
ilius_pyglossary/pyglossary/plugins/iupac_goldbook.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
from __future__ import annotations
from collections.abc import Iterator
from io import BytesIO
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyglossary.glossary_types import (
EntryType,
GlossaryType,
)
from pyglossary.lxml_types import Element
from pyglossary.option import Option
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import exc_note, log, pip
from pyglossary.html_utils import unescape_unicode
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "iupac_goldbook"
format = "IUPACGoldbook"
description = "IUPAC goldbook (.xml)"
extensions = ()
extensionCreate = ".xml"
singleFile = True
kind = "text"
wiki = ""
website = "https://goldbook.iupac.org/"
optionsProp: "dict[str, Option]" = {}
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file = None
self._fileSize = 0
self._termByCode = None
def __len__(self) -> int:
return 0
def close(self) -> None:
if self._file:
self._file.close()
self._file = None
self._filename = ""
self._fileSize = 0
self._termByCode = None
def open(self, filename: str) -> None:
try:
from lxml import etree as ET
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
self._filename = filename
_file = compressionOpen(filename, mode="rb")
_file.seek(0, 2)
self._fileSize = _file.tell()
_file.seek(0)
chunk = _file.read(800)
chunk_end = chunk.find(b"<entries>")
chunk = chunk[:chunk_end]
chunk += b"</vocabulary>"
infoRoot = ET.fromstring(chunk)
self.setMetadata(infoRoot)
_file.seek(0)
context = ET.iterparse(
_file,
events=("end",),
tag="entry",
)
termByCode = {}
for _, elem in context:
termE = elem.find("./term")
if termE is None:
continue
term = self.getTerm(termE)
codeE = elem.find("./code")
if codeE is None:
continue
termByCode[codeE.text] = term
self._termByCode = termByCode
_file.close()
def setGlosInfo(self, key: str, value: str) -> None:
if value is None:
return
self._glos.setInfo(key, unescape_unicode(value))
def setMetadata(self, header: Element) -> None:
if header is None:
return
title = header.find("./title")
if title:
self.setGlosInfo("name", title.text)
publisher = header.find("./publisher")
if publisher:
self.setGlosInfo("publisher", publisher.text)
isbn = header.find("./isbn")
if isbn:
self.setGlosInfo("isbn", isbn.text)
doi = header.find("./doi")
if doi:
self.setGlosInfo("doi", doi.text)
accessdate = header.find("./accessdate")
if accessdate:
self.setGlosInfo("creationTime", accessdate.text)
@staticmethod
def tostring(
elem: Element,
) -> str:
from lxml import etree as ET
return (
ET.tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
@staticmethod
def innerXML(elem: Element) -> str:
from lxml import etree as ET
elemName = elem.xpath("name(/*)")
resultStr = ""
for e in elem.xpath("/" + elemName + "/node()"):
if isinstance(e, str):
pass # resultStr += e
else:
resultStr += ET.tostring(e, encoding="unicode")
return resultStr
def getTerm(self, termE: Element) -> str: # noqa: PLR6301
from lxml import etree as ET
term = (
ET.tostring(
termE,
method="html",
pretty_print=False,
)
.decode("utf-8")
.strip()[6:-7]
.strip()
)
term = unescape_unicode(term)
term = term.replace("<i>", "").replace("</i>", "")
return term # noqa: RET504
def __iter__(self) -> Iterator[EntryType]: # noqa: PLR0912
from lxml import etree as ET
glos = self._glos
fileSize = self._fileSize
termByCode = self._termByCode
self._file = _file = compressionOpen(self._filename, mode="rb")
context = ET.iterparse(
self._file,
events=("end",),
tag="entry",
)
for _, elem in context: # noqa: PLR1702
codeE = elem.find("./code")
if codeE is None:
continue
code = codeE.text
_id = elem.attrib.get("id")
termE = elem.find("./term")
if termE is None:
log.warning(f"no term, {code=}, {_id=}")
continue
term = self.getTerm(termE)
words = []
if term:
words.append(term)
if code:
words.append(code)
# if _id is not None:
# words.append(f"id{_id}")
identifierTerm = elem.find("./identifiers/term")
if identifierTerm is not None and identifierTerm.text:
words.append(identifierTerm.text)
identifierSynonym = elem.find("./identifiers/synonym")
if identifierSynonym is not None and identifierSynonym.text:
words.append(identifierSynonym.text)
defiParts = []
definition = elem.find("./definition")
if definition is None or not definition.text:
pass
else:
defiParts.append(definition.text)
definitionEntryList = elem.findall("./definition/entry")
if definitionEntryList:
bio = BytesIO()
with ET.htmlfile(bio, encoding="utf-8") as hf:
with hf.element("ol"):
for item in definitionEntryList:
if not item.text:
continue
with hf.element("li"):
hf.write(item.text)
listHtml = bio.getvalue().decode("utf-8")
defiParts.append(listHtml)
replacedbyE = elem.find("./replacedby")
if replacedbyE is not None:
replacedby = replacedbyE.text
replacedbyCode = replacedby.split(".")[-1]
replacedbyTerm = termByCode.get(replacedbyCode)
if replacedbyTerm is None:
log.warning(f"{term}: {replacedby=}")
replacedbyTerm = replacedbyCode
defiParts.append(
f'Replaced by: <a href="bword://{replacedbyTerm}">{replacedbyTerm}</a>',
)
relatedList = elem.findall("./related/entry")
if relatedList:
relatedLinkList = []
for related in relatedList:
relatedURL = related.text
relatedCode = relatedURL.split("/")[-1]
relatedTerm = termByCode.get(relatedCode)
if not relatedTerm:
log.warning(f"{term}: {relatedURL=}")
relatedTerm = relatedCode
relatedLinkList.append(
f'<a href="bword://{relatedTerm}">{relatedTerm}</a>',
)
defiParts.append("Related: " + ", ".join(relatedLinkList))
lastupdatedE = elem.find("./lastupdated")
if lastupdatedE is not None:
defiParts.append(f"Last updated: {lastupdatedE.text}")
urlE = elem.find("./url")
if urlE is not None:
defiParts.append(f'<a href="{urlE.text}">More info.</a>')
if len(defiParts) > 1:
defiParts.insert(1, "")
try:
defi = "<br/>".join(defiParts)
except Exception:
log.error(f"{defiParts = }")
continue
yield glos.newEntry(
words,
defi,
defiFormat="h",
byteProgress=(_file.tell(), fileSize),
)
# clean up preceding siblings to save memory
# this can reduce memory usage from >300 MB to ~25 MB
parent = elem.getparent()
if parent is None:
continue
while elem.getprevious() is not None:
del parent[0]
| 7,188
|
Python
|
.py
| 265
| 23.384906
| 77
| 0.676026
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,810
|
info_plugin.py
|
ilius_pyglossary/pyglossary/plugins/info_plugin.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Iterator
from typing import TYPE_CHECKING
from pyglossary.info_writer import InfoWriter as Writer
if TYPE_CHECKING:
from pyglossary.glossary_types import (
EntryType,
GlossaryType,
)
from pyglossary.option import Option
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "info"
format = "Info"
description = "Glossary Info (.info)"
extensions = (".info",)
extensionCreate = ".info"
singleFile = True
kind = "text"
wiki = ""
website = None
# key is option/argument name, value is instance of Option
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
def close(self) -> None:
pass
def open(self, filename: str) -> None:
from pyglossary.json_utils import jsonToOrderedData
with open(filename, encoding="utf-8") as infoFp:
info = jsonToOrderedData(infoFp.read())
for key, value in info.items():
self._glos.setInfo(key, value)
def __len__(self) -> int:
return 0
def __iter__(self) -> Iterator[EntryType | None]:
yield None
| 1,269
|
Python
|
.py
| 53
| 21.830189
| 58
| 0.718802
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,811
|
ayandict_sqlite.py
|
ilius_pyglossary/pyglossary/plugins/ayandict_sqlite.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Generator, Iterator
from typing import (
TYPE_CHECKING,
)
if TYPE_CHECKING:
import sqlite3
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.xdxf.transform import XdxfTransformer
from pyglossary.core import log
from pyglossary.option import BoolOption, Option
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "ayandict_sqlite"
format = "AyanDictSQLite"
description = "AyanDict SQLite"
extensions = ()
extensionCreate = ".db"
singleFile = True
kind = "binary"
wiki = ""
website = (
"https://github.com/ilius/ayandict",
"ilius/ayandict",
)
optionsProp: "dict[str, Option]" = {
"fuzzy": BoolOption(
comment="Create fuzzy search data",
),
}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
self._con = connect(filename)
self._cur = self._con.cursor()
self._glos.setDefaultDefiFormat("h")
self._cur.execute("SELECT key, value FROM meta;")
for row in self._cur.fetchall():
if row[0] == "hash":
continue
self._glos.setInfo(row[0], row[1])
def __len__(self) -> int:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute("select count(id) from entry")
return self._cur.fetchone()[0]
def __iter__(self) -> Iterator[EntryType]:
from json import loads
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute(
"SELECT entry.term, entry.article, "
"json_group_array(alt.term)"
"FROM entry LEFT JOIN alt ON entry.id=alt.id "
"GROUP BY entry.id;",
)
for row in self._cur.fetchall():
terms = [row[0]] + [alt for alt in loads(row[2]) if alt]
article = row[1]
yield self._glos.newEntry(terms, article, defiFormat="h")
def close(self) -> None:
if self._cur:
self._cur.close()
if self._con:
self._con.close()
self._clear()
class Writer:
_fuzzy: int = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
self._xdxfTr: "XdxfTransformer | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
con = self._con = connect(filename)
self._cur = self._con.cursor()
for query in (
"CREATE TABLE meta ('key' TEXT PRIMARY KEY NOT NULL, 'value' TEXT);",
(
"CREATE TABLE entry ('id' INTEGER PRIMARY KEY NOT NULL, "
"'term' TEXT, 'article' TEXT);"
),
"CREATE TABLE alt ('id' INTEGER NOT NULL, 'term' TEXT);",
"CREATE INDEX idx_meta ON meta(key);",
"CREATE INDEX idx_entry_term ON entry(term COLLATE NOCASE);",
"CREATE INDEX idx_alt_id ON alt(id);",
"CREATE INDEX idx_alt_term ON alt(term COLLATE NOCASE);",
):
try:
con.execute(query)
except Exception as e: # noqa: PERF203
log.error(f"query: {query}")
raise e
for key, value in self._glos.iterInfo():
con.execute(
"INSERT INTO meta (key, value) VALUES (?, ?);",
(key, value),
)
if self._fuzzy:
con.execute(
"CREATE TABLE fuzzy3 ('sub' TEXT NOT NULL, "
"'term' TEXT NOT NULL, "
"id INTEGER NOT NULL);",
)
con.execute(
"CREATE INDEX idx_fuzzy3_sub ON fuzzy3(sub COLLATE NOCASE);",
)
con.commit()
def finish(self) -> None:
if self._con is None or self._cur is None:
return
self._con.commit()
self._con.close()
self._con = None
self._cur = None
def xdxf_setup(self) -> None:
from pyglossary.xdxf.transform import XdxfTransformer
# if self._xsl:
# self._xdxfTr = XslXdxfTransformer(encoding="utf-8")
# return
self._xdxfTr = XdxfTransformer(encoding="utf-8")
def xdxf_transform(self, text: str) -> str:
if self._xdxfTr is None:
self.xdxf_setup()
return self._xdxfTr.transformByInnerString(text) # type: ignore
def write(self) -> Generator[None, EntryType, None]:
import hashlib
cur = self._cur
if cur is None:
raise ValueError("cur is None")
_hash = hashlib.md5()
while True:
entry = yield
if entry is None:
break
if entry.isData():
# can save it with entry.save(directory)
continue
defi = entry.defi
entry.detectDefiFormat()
if entry.defiFormat == "m":
if "\n" in defi:
defi = f"<pre>{defi}</pre>"
elif entry.defiFormat == "x":
defi = self.xdxf_transform(defi)
cur.execute(
"INSERT INTO entry(term, article) VALUES (?, ?);",
(entry.l_word[0], defi),
)
_id = cur.lastrowid
if _id is None:
raise ValueError("lastrowid is None")
for alt in entry.l_word[1:]:
cur.execute(
"INSERT INTO alt(id, term) VALUES (?, ?);",
(_id, alt),
)
_hash.update(entry.s_word.encode("utf-8"))
if self._fuzzy:
self.addFuzzy(_id, entry.l_word)
cur.execute(
"INSERT INTO meta (key, value) VALUES (?, ?);",
("hash", _hash.hexdigest()),
)
def addFuzzy(self, _id: int, terms: list[str]) -> None:
cur = self._cur
if cur is None:
raise ValueError("cur is None")
for term in terms:
subs: set[str] = set()
for word in term.split(" "):
eword = "\n" + word
subs.update(eword[i : i + 3] for i in range(len(eword) - 2))
for sub in subs:
cur.execute(
"INSERT INTO fuzzy3(sub, term, id) VALUES (?, ?, ?);",
(sub, term, _id),
)
| 5,764
|
Python
|
.py
| 206
| 24.635922
| 72
| 0.661717
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,812
|
yomichan.py
|
ilius_pyglossary/pyglossary/plugins/yomichan.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
import re
from collections.abc import Generator, Sequence
from typing import TYPE_CHECKING, Any
from pyglossary import os_utils
from pyglossary.flags import ALWAYS
from pyglossary.option import (
BoolOption,
IntOption,
Option,
StrOption,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "yomichan"
format = "Yomichan"
description = "Yomichan (.zip)"
extensions = (".zip",)
extensionCreate = ".zip"
singleFile = True
sortOnWrite = ALWAYS
sortKeyName = "headword"
kind = "package"
wiki = ""
website = (
"https://foosoft.net/projects/yomichan/",
"foosoft.net",
)
optionsProp: "dict[str, Option]" = {
"term_bank_size": IntOption(
comment="The number of terms in each term bank json file.",
),
"term_from_headword_only": BoolOption(
comment=(
"If set to true, only create a term for the headword for each entry, "
"as opposed to create one term for each alternate word. "
"If the headword is ignored by the `ignore_word_with_pattern` option, "
"the next word in the alternate list that is not ignored is used as "
"headword."
),
),
"no_term_from_reading": BoolOption(
comment=(
"When there are multiple alternate words, don't create term for the "
"one that is the same as the the reading form, which is chosen to be "
"the first alternate forms that consists solely of Hiragana and "
"Katakana. "
"For example, an entry could contain both 'だいがく' and '大学' as "
"alternate words. Setting this option to true would prevent a term "
"to be created for the former."
),
),
"delete_word_pattern": StrOption(
comment=(
"When given, all non-overlapping matches of this regular expression "
"are removed from word strings. "
"For example, if an entry has word 'あま·い', setting the "
"pattern to `·` removes all center dots, or more precisely use "
"`·(?=[\\u3040-\\u309F])` to only remove center dots that precede "
"Hiragana characters. Either way, the original word is replaced "
"with 'あまい'."
),
),
"ignore_word_with_pattern": StrOption(
comment=(
"When given, don't create terms for a word if any of its substrings "
"matches this regular expression. "
"For example, an entry could contain both 'だいがく【大学】' and '大学' "
"as alternate words. Setting this option with value `r'【.+】'` would "
"prevent a term to be created for the former."
),
),
"alternates_from_word_pattern": StrOption(
comment=(
"When given, the regular expression is used to find additional "
"alternate words for the same entry from matching substrings in "
"the original words. "
"If there are no capturing groups in the regular expression, "
"then all matched substrings are added to the list of alternate "
"words. "
"If there are capturing groups, then substrings matching the groups "
"are added to the alternate words list instead. "
"For example, if an entry has 'だいがく【大学】' as a word, then "
"`\\w+(?=【)` adds 'だいがく' as an additional word, while "
"`(\\w+)【(\\w+)】` adds both 'だいがく' and '大学'."
),
),
"alternates_from_defi_pattern": StrOption(
comment=(
"When given, the regular expression is used to find additional "
"alternate words for the same entry from matching substrings in "
"the definition. `^` and `$` can be used to match start and end of "
"lines, respectively. "
"If there are no capturing groups in the regular expression, "
"then all matched substrings are added to the list of alternate "
"words. "
"If there are capturing groups, then substrings matching the groups "
"are added to the alternate words list instead. "
"For example, if an entry has 'だいがく【大学】' in its definition, then "
"`\\w+【(\\w+)】` adds '大学' as an additional word."
),
),
"rule_v1_defi_pattern": StrOption(
comment=(
"When given, if any substring of an entry's definition matches this "
"regular expression, then the term(s) created from entry are labeled "
"as ichidan verb. Yomichan uses this information to match conjugated "
"forms of words. `^` and `$` can be used to match start and end of "
"lines, respectively. "
"For example, setting this option to `^\\(動[上下]一\\)$` identifies "
"entries where there's a line of '(動上一)' or '(動下一)'."
),
),
"rule_v5_defi_pattern": StrOption(
comment=(
"When given, if any substring of an entry's definition matches this "
"regular expression, then the term(s) created from entry are labeled "
"as godan verb. Yomichan uses this information to match conjugated "
"forms of words. `^` and `$` can be used to match start and end of "
"lines, respectively. "
"For example, setting this option to `^\\(動五\\)$` identifies "
"entries where there's a line of '(動五)'."
),
),
"rule_vs_defi_pattern": StrOption(
comment=(
"When given, if any substring of an entry's definition matches this "
"regular expression, then the term(s) created from entry are labeled "
"as suru verb. Yomichan uses this information to match conjugated "
"forms of words. `^` and `$` can be used to match start and end of "
"lines, respectively. "
"For example, setting this option to `^スル$` identifies entries where "
"there's a line of 'スル'."
),
),
"rule_vk_defi_pattern": StrOption(
comment=(
"When given, if any substring of an entry's definition matches this "
"regular expression, then the term(s) created from entry are labeled "
"as kuru verb. Yomichan uses this information to match conjugated "
"forms of words. `^` and `$` can be used to match start and end of "
"lines, respectively. "
"For example, setting this option to `^\\(動カ変\\)$` identifies "
"entries where there's a line of '(動カ変)'."
),
),
"rule_adji_defi_pattern": StrOption(
comment=(
"When given, if any substring of an entry's definition matches this "
"regular expression, then the term(s) created from entry are labeled "
"as i-adjective. Yomichan uses this information to match conjugated "
"forms of words. `^` and `$` can be used to match start and end of "
"lines, respectively. "
"For example, setting this option to `r'^\\(形\\)$'` identify "
"entries where there's a line of '(形)'."
),
),
}
def _isKana(char: str) -> bool:
assert len(char) == 1
val = ord(char)
return (
0x3040 <= val <= 0x309F # Hiragana
or 0x30A0 <= val <= 0x30FF # Katakana (incl. center dot)
or 0xFF65 <= val <= 0xFF9F # Half-width Katakana (incl. center dot)
)
def _isKanji(char: str) -> bool:
assert len(char) == 1
val = ord(char)
return (
0x3400 <= val <= 0x4DBF # CJK Unified Ideographs Extension A
or 0x4E00 <= val <= 0x9FFF # CJK Unified Ideographs
or 0xF900 <= val <= 0xFAFF # CJK Compatibility Ideographs
or 0x20000 <= val <= 0x2A6DF # CJK Unified Ideographs Extension B
or 0x2A700 <= val <= 0x2B73F # CJK Unified Ideographs Extension C
or 0x2B740 <= val <= 0x2B81F # CJK Unified Ideographs Extension D
or 0x2F800 <= val <= 0x2FA1F # CJK Compatibility Ideographs Supplement
)
def _uniqueList(lst: Sequence) -> list[Any]:
seen = set()
result = []
for elem in lst:
if elem not in seen:
seen.add(elem)
result.append(elem)
return result
class Writer:
depends = {
"bs4": "beautifulsoup4",
}
_term_bank_size = 10_000
_term_from_headword_only = True
_no_term_from_reading = True
_delete_word_pattern = ""
_ignore_word_with_pattern = ""
_alternates_from_word_pattern = ""
_alternates_from_defi_pattern = ""
_rule_v1_defi_pattern = ""
_rule_v5_defi_pattern = ""
_rule_vs_defi_pattern = ""
_rule_vk_defi_pattern = ""
_rule_adji_defi_pattern = ""
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
# Yomichan technically supports "structured content" that renders to
# HTML, but it doesn't seem widely used. So here we also strip HTML
# formatting for simplicity.
glos.removeHtmlTagsAll()
def _getInfo(self, key: str) -> str:
info = self._glos.getInfo(key)
return info.replace("\n", "<br>")
def _getAuthor(self) -> str:
return self._glos.author.replace("\n", "<br>")
def _getDictionaryIndex(self) -> dict[str, Any]:
# Schema: https://github.com/FooSoft/yomichan/
# blob/master/ext/data/schemas/dictionary-index-schema.json
return {
"title": self._getInfo("title"),
"revision": "PyGlossary export",
"sequenced": True,
"format": 3,
"author": self._getAuthor(),
"url": self._getInfo("website"),
"description": self._getInfo("description"),
}
def _compileRegex(self) -> None:
for field_name in (
"_delete_word_pattern",
"_ignore_word_with_pattern",
"_alternates_from_word_pattern",
"_alternates_from_defi_pattern",
"_rule_v1_defi_pattern",
"_rule_v5_defi_pattern",
"_rule_vs_defi_pattern",
"_rule_vk_defi_pattern",
"_rule_adji_defi_pattern",
):
value = getattr(self, field_name)
if value and isinstance(value, str):
setattr(self, field_name, re.compile(value))
def _getExpressionsAndReadingFromEntry(
self,
entry: EntryType,
) -> tuple[list[str], str]:
term_expressions = list(entry.l_word)
if self._alternates_from_word_pattern:
for word in entry.l_word:
term_expressions += re.findall(
self._alternates_from_word_pattern,
word,
)
if self._alternates_from_defi_pattern:
term_expressions += re.findall(
self._alternates_from_defi_pattern,
entry.defi,
re.MULTILINE,
)
if self._delete_word_pattern:
term_expressions = [
re.sub(self._delete_word_pattern, "", expression)
for expression in term_expressions
]
if self._ignore_word_with_pattern:
term_expressions = [
expression
for expression in term_expressions
if not re.search(self._ignore_word_with_pattern, expression)
]
term_expressions = _uniqueList(term_expressions)
try:
reading = next(
expression
for expression in entry.l_word + term_expressions
if all(map(_isKana, expression))
)
except StopIteration:
reading = ""
if self._no_term_from_reading and len(term_expressions) > 1:
term_expressions = [
expression for expression in term_expressions if expression != reading
]
if self._term_from_headword_only:
term_expressions = term_expressions[:1]
return term_expressions, reading
def _getRuleIdentifiersFromEntry(self, entry: EntryType) -> list[str]:
return [
r
for p, r in (
(self._rule_v1_defi_pattern, "v1"),
(self._rule_v5_defi_pattern, "v5"),
(self._rule_vs_defi_pattern, "vs"),
(self._rule_vk_defi_pattern, "vk"),
(self._rule_adji_defi_pattern, "adj-i"),
)
if p and re.search(p, entry.defi, re.MULTILINE)
]
def _getTermsFromEntry(
self,
entry: EntryType,
sequenceNumber: int,
) -> list[list[Any]]:
termExpressions, reading = self._getExpressionsAndReadingFromEntry(entry)
ruleIdentifiers = self._getRuleIdentifiersFromEntry(entry)
# Schema: https://github.com/FooSoft/yomichan/
# blob/master/ext/data/schemas/dictionary-term-bank-v3-schema.json
return [
[
expression,
# reading only added if expression contains kanji
reading if any(map(_isKanji, expression)) else "",
"", # definition tags
" ".join(ruleIdentifiers),
0, # score
[entry.defi],
sequenceNumber,
"", # term tags
]
for expression in termExpressions
]
def open(self, filename: str) -> None:
self._filename = filename
self._glos.mergeEntriesWithSameHeadwordPlaintext()
def finish(self) -> None:
self._filename = ""
def write(self) -> Generator[None, EntryType, None]:
with os_utils.indir(self._filename, create=True):
with open("index.json", "w", encoding="utf-8") as f:
json.dump(self._getDictionaryIndex(), f, ensure_ascii=False)
entryCount = 0
termBankIndex = 0
terms: list[list[Any]] = []
def flushTerms() -> None:
nonlocal termBankIndex
if not terms:
return
with open(
f"term_bank_{termBankIndex + 1}.json",
mode="w",
encoding="utf-8",
) as _file:
json.dump(terms, _file, ensure_ascii=False)
terms.clear()
termBankIndex += 1
while True:
entry: EntryType
entry = yield
if entry is None:
break
if entry.isData():
continue
terms.extend(self._getTermsFromEntry(entry, entryCount))
entryCount += 1
if len(terms) >= self._term_bank_size:
flushTerms()
flushTerms()
| 12,771
|
Python
|
.py
| 373
| 30.407507
| 75
| 0.692598
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,813
|
formats_common.py
|
ilius_pyglossary/pyglossary/plugins/formats_common.py
|
import logging
import os
import sys
from os.path import (
exists,
isdir,
isfile,
join,
split,
splitext,
)
from pprint import pformat
from pyglossary.core import rootDir
sys.path.insert(0, rootDir) # noqa: E402
from pyglossary import core
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import (
cacheDir,
pip,
)
from pyglossary.flags import (
ALWAYS,
DEFAULT_NO,
DEFAULT_YES,
NEVER,
YesNoAlwaysNever,
)
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import (
BoolOption,
DictOption,
EncodingOption,
FileSizeOption,
FloatOption,
HtmlColorOption,
IntOption,
ListOption,
NewlineOption,
Option,
StrOption,
)
from pyglossary.os_utils import indir
from pyglossary.text_utils import toStr
log = logging.getLogger("pyglossary")
enable = False
lname = ""
format = "Unknown"
description = "Unknown"
extensions: "tuple[str, ...]" = ()
extensionCreate = ""
singleFile = False
kind = ""
wiki = ""
website = None
# key is option/argument name, value is instance of Option
optionsProp: "dict[str, Option]" = {}
sortOnWrite: YesNoAlwaysNever = DEFAULT_NO
__all__ = [
"ALWAYS",
"DEFAULT_NO",
"DEFAULT_YES",
"NEVER",
"BoolOption",
"DictOption",
"EncodingOption",
"EntryType",
"FileSizeOption",
"FloatOption",
"GlossaryType",
"HtmlColorOption",
"IntOption",
"ListOption",
"NewlineOption",
"StrOption",
"YesNoAlwaysNever",
"cacheDir",
"compressionOpen",
"core",
"description",
"enable",
"exists",
"extensionCreate",
"extensions",
"format",
"indir",
"isdir",
"isfile",
"join",
"kind",
"lname",
"log",
"logging",
"optionsProp",
"os",
"pformat",
"pip",
"rootDir",
"singleFile",
"sortOnWrite",
"split",
"splitext",
"stdCompressions",
"toStr",
"website",
"wiki",
]
| 1,806
|
Python
|
.py
| 109
| 14.825688
| 61
| 0.751924
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,814
|
jmnedict.py
|
ilius_pyglossary/pyglossary/plugins/jmnedict.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import re
from io import BytesIO
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import io
from collections.abc import Callable, Iterator
from pyglossary.glossary_types import (
EntryType,
GlossaryType,
)
from pyglossary.lxml_types import Element, T_htmlfile
from pyglossary.option import Option
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import exc_note, pip
from pyglossary.io_utils import nullBinaryIO
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "jmnedict"
format = "JMnedict"
description = "JMnedict"
extensions = ()
extensionCreate = ""
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/JMdict"
website = (
"https://www.edrdg.org/wiki/index.php/Main_Page",
"EDRDG Wiki",
)
optionsProp: "dict[str, Option]" = {}
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
tagStyle = (
"color:white;"
"background:green;"
"padding-left:3px;"
"padding-right:3px;"
"border-radius:0.5ex;"
# 0.5ex ~= 0.3em, but "ex" is recommended
)
gikun_key = "gikun (meaning as reading) or jukujikun (special kanji reading)"
re_inf_mapping = {
gikun_key: "gikun/jukujikun",
"out-dated or obsolete kana usage": "obsolete", # outdated/obsolete
"word containing irregular kana usage": "irregular",
}
@staticmethod
def makeList(
hf: "T_htmlfile",
input_objects: list[Element],
processor: Callable,
single_prefix: str = "",
skip_single: bool = True,
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not input_objects:
return
if skip_single and len(input_objects) == 1:
hf.write(single_prefix)
processor(hf, input_objects[0])
return
with hf.element("ol"):
for el in input_objects:
with hf.element("li"):
processor(hf, el)
def writeTrans(
self,
hf: "T_htmlfile",
trans: Element,
) -> None:
from lxml import etree as ET
def br() -> Element:
return ET.Element("br")
for elem in trans.findall("name_type"):
if not elem.text:
continue
desc = elem.text
with hf.element("i"):
hf.write(f"{desc.capitalize()}")
hf.write(br())
for elem in trans.findall("trans_det"):
if not elem.text:
continue
desc = elem.text
hf.write(f"{desc}")
hf.write(br())
relatedWords = []
for elem in trans.findall("xref"):
if not elem.text:
continue
word = elem.text.strip()
word = self._link_number_postfix.sub("", word)
relatedWords.append(word)
if relatedWords:
hf.write("Related: ")
for i, word in enumerate(relatedWords):
if i > 0:
with hf.element("big"):
hf.write(" | ")
with hf.element("a", href=f"bword://{word}"):
hf.write(word)
hf.write(br())
def getEntryByElem( # noqa: PLR0912
self,
entry: Element,
) -> EntryType:
from lxml import etree as ET
glos = self._glos
keywords = []
f = BytesIO()
def br() -> Element:
return ET.Element("br")
with ET.htmlfile(f, encoding="utf-8") as hf: # noqa: PLR1702
kebList: list[str] = []
rebList: "list[tuple[str, list[str]]]" = []
with hf.element("div"):
for k_ele in entry.findall("k_ele"):
keb = k_ele.find("keb")
if keb is None:
continue
if not keb.text:
continue
kebList.append(keb.text)
keywords.append(keb.text)
# for elem in k_ele.findall("ke_pri"):
# log.info(elem.text)
for r_ele in entry.findall("r_ele"):
reb = r_ele.find("reb")
if reb is None:
continue
if not reb.text:
continue
props = []
if r_ele.find("re_nokanji") is not None:
props.append("no kanji")
inf = r_ele.find("re_inf")
if inf is not None and inf.text:
props.append(
self.re_inf_mapping.get(inf.text, inf.text),
)
rebList.append((reb.text, props))
keywords.append(reb.text)
# for elem in r_ele.findall("re_pri"):
# log.info(elem.text)
# this is for making internal links valid
# this makes too many alternates!
# but we don't seem to have a choice
# except for scanning and indexing all words once
# and then starting over and fixing/optimizing links
for s_keb in kebList:
for s_reb, _ in rebList:
keywords.append(f"{s_keb}・{s_reb}")
if kebList:
with hf.element(glos.titleTag(kebList[0])):
for i, s_keb in enumerate(kebList):
if i > 0:
with hf.element("font", color="red"):
hf.write(" | ")
hf.write(s_keb)
hf.write(br())
if rebList:
for i, (s_reb, props) in enumerate(rebList):
if i > 0:
with hf.element("font", color="red"):
hf.write(" | ")
with hf.element("font", color="green"):
hf.write(s_reb)
for prop in props:
hf.write(" ")
with hf.element("small"):
with hf.element("span", style=self.tagStyle):
hf.write(prop)
hf.write(br())
_hf = cast("T_htmlfile", hf)
self.makeList(
_hf,
entry.findall("trans"),
self.writeTrans,
)
defi = f.getvalue().decode("utf-8")
_file = self._file
byteProgress = (_file.tell(), self._fileSize)
return self._glos.newEntry(
keywords,
defi,
defiFormat="h",
byteProgress=byteProgress,
)
@staticmethod
def tostring(elem: Element) -> str:
from lxml import etree as ET
return (
ET.tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
def setCreationTime(self, header: str) -> None:
m = re.search("JMdict created: ([0-9]{4}-[0-9]{2}-[0-9]{2})", header)
if m is None:
return
self._glos.setInfo("creationTime", m.group(1))
def setMetadata(self, header: str) -> None:
# TODO: self.set_info("edition", ...)
self.setCreationTime(header)
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._wordCount = 0
self._filename = ""
self._file: "io.IOBase" = nullBinaryIO
self._fileSize = 0
self._link_number_postfix = re.compile("・[0-9]+$")
def __len__(self) -> int:
return self._wordCount
def close(self) -> None:
if self._file:
self._file.close()
self._file = nullBinaryIO
def open(
self,
filename: str,
) -> None:
try:
from lxml import etree as ET # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
self._filename = filename
self._fileSize = os.path.getsize(filename)
self._glos.sourceLangName = "Japanese"
self._glos.setDefaultDefiFormat("h")
self._glos.setInfo("definition_has_headwords", "True")
self._glos.setInfo("entry_url", "https://jisho.org/search/{word}")
# also good: f"https://sakuradict.com/search?q={{word}}"
header = ""
with compressionOpen(filename, mode="rt", encoding="utf-8") as text_file:
text_file = cast("io.TextIOBase", text_file)
for line in text_file:
if "<JMdict>" in line:
break
header += line
self.setMetadata(header)
self._file = compressionOpen(filename, mode="rb")
def __iter__(self) -> Iterator[EntryType]:
from lxml import etree as ET
context = ET.iterparse( # type: ignore # noqa: PGH003
self._file,
events=("end",),
tag="entry",
)
for _, _elem in context:
elem = cast("Element", _elem)
yield self.getEntryByElem(elem)
# clean up preceding siblings to save memory
# this reduces memory usage from ~64 MB to ~30 MB
parent = elem.getparent()
if parent is None:
continue
while elem.getprevious() is not None:
del parent[0]
| 7,692
|
Python
|
.py
| 285
| 23.066667
| 78
| 0.656203
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,815
|
quickdic6.py
|
ilius_pyglossary/pyglossary/plugins/quickdic6.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import datetime as dt
import functools
import gzip
import io
import math
import pathlib
import struct
import typing
import zipfile
from typing import IO, TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from collections.abc import Callable
from typing import Any, Literal
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.core import log
from pyglossary.flags import NEVER
from pyglossary.langs import langDict
from pyglossary.option import (
Option,
StrOption,
)
from pyglossary.plugin_lib import mutf8
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "quickdic6"
format = "QuickDic6"
description = "QuickDic version 6 (.quickdic)"
extensions = (".quickdic", ".quickdic.v006.zip")
extensionCreate = ".quickdic"
singleFile = True
sortOnWrite = NEVER
kind = "binary"
wiki = ""
website = (
"https://github.com/rdoeffinger/Dictionary",
"github.com/rdoeffinger/Dictionary",
)
# https://github.com/rdoeffinger/Dictionary/blob/master/dictionary-format-v6.txt
optionsProp: "dict[str, Option]" = {
"normalizer_rules": StrOption(
comment="ICU normalizer rules to use for index sorting",
),
"source_lang": StrOption(
comment="The language of the tokens in the dictionary index",
),
"target_lang": StrOption(
comment="The language of the dictionary entries",
),
}
HASH_SET_INIT = (
b"\xac\xed" # magic
b"\x00\x05" # version
b"\x73" # object
b"\x72" # class
# Java String "java.util.HashSet":
b"\x00\x11\x6a\x61\x76\x61\x2e\x75\x74\x69"
b"\x6c\x2e\x48\x61\x73\x68\x53\x65\x74"
)
"""First part of Java serialization of java.util.HashSet"""
HASH_SET_INIT2 = (
# serialization ID:
b"\xba\x44\x85\x95\x96\xb8\xb7\x34"
b"\x03" # flags: serialized, custom serialization function
b"\x00\x00" # fields count
b"\x78" # blockdata end
b"\x70" # null (superclass)
b"\x77\x0c" # blockdata short, 0xc bytes
)
"""Second part of Java serialization of java.util.HashSet"""
LINKED_HASH_SET_INIT = (
(
b"\xac\xed" # magic
b"\x00\x05" # version
b"\x73" # object
b"\x72" # class
# Java String "java.util.LinkedHashSet":
b"\x00\x17\x6a\x61\x76\x61\x2e\x75\x74\x69"
b"\x6c\x2e\x4c\x69\x6e\x6b\x65\x64"
b"\x48\x61\x73\x68\x53\x65\x74"
# serialization ID:
b"\xd8\x6c\xd7\x5a\x95\xdd\x2a\x1e"
b"\x02" # flags
b"\x00\x00" # fields count
b"\x78" # blockdata end
b"\x72" # superclass (java.util.HashSet)
b"\x00\x11\x6a\x61\x76\x61\x2e\x75\x74\x69"
b"\x6c\x2e\x48\x61\x73\x68\x53\x65\x74"
)
+ HASH_SET_INIT2
)
"""Header of Java serialization of java.util.LinkedHashSet"""
HASH_SET_CAPACITY_FACTOR = 0.75
"""Capacity factor used to determine the hash set's capacity from its length"""
default_de_normalizer_rules = (
":: Lower; 'ae' > 'ä'; 'oe' > 'ö'; 'ue' > 'ü'; 'ß' > 'ss'; "
)
default_normalizer_rules = (
":: Any-Latin; ' ' > ; "
":: Lower; :: NFD; "
":: [:Nonspacing Mark:] Remove; "
":: NFC ;"
)
def read_byte(fp: IO[bytes]) -> int:
return struct.unpack(">b", fp.read(1))[0]
def write_byte(fp: IO[bytes], val: int) -> int:
return fp.write(struct.pack(">b", val))
def read_bool(fp: IO[bytes]) -> bool:
return bool(read_byte(fp))
def write_bool(fp: IO[bytes], val: int) -> int:
return write_byte(fp, val)
def read_short(fp: IO[bytes]) -> int:
return struct.unpack(">h", fp.read(2))[0]
def write_short(fp: IO[bytes], val: int) -> int:
return fp.write(struct.pack(">h", val))
def read_int(fp: IO[bytes]) -> int:
return struct.unpack(">i", fp.read(4))[0]
def write_int(fp: IO[bytes], val: int) -> int:
return fp.write(struct.pack(">i", val))
def read_long(fp: IO[bytes]) -> int:
return struct.unpack(">q", fp.read(8))[0]
def write_long(fp: IO[bytes], val: int) -> int:
return fp.write(struct.pack(">q", val))
def read_float(fp: IO[bytes]) -> float:
return struct.unpack(">f", fp.read(4))[0]
def write_float(fp: IO[bytes], val: float) -> int:
return fp.write(struct.pack(">f", val))
def read_string(fp: IO[bytes]) -> str:
length = read_short(fp)
return mutf8.decode_modified_utf8(fp.read(length))
def write_string(fp: IO[bytes], val: str) -> int:
b_string = mutf8.encode_modified_utf8(val)
return write_short(fp, len(b_string)) + fp.write(b_string)
def read_hashset(fp: IO[bytes]) -> list[str]:
hash_set_init = fp.read(len(HASH_SET_INIT))
if hash_set_init == HASH_SET_INIT:
hash_set_init2 = fp.read(len(HASH_SET_INIT2))
assert hash_set_init2 == HASH_SET_INIT2
else:
n_extra = len(LINKED_HASH_SET_INIT) - len(HASH_SET_INIT)
hash_set_init += fp.read(n_extra)
assert hash_set_init == LINKED_HASH_SET_INIT
read_int(fp) # capacity
capacity_factor = read_float(fp)
assert capacity_factor == HASH_SET_CAPACITY_FACTOR
num_entries = read_int(fp)
data: list[str] = []
while len(data) < num_entries:
assert read_byte(fp) == 0x74
data.append(read_string(fp))
assert read_byte(fp) == 0x78
return data
def write_hashset(
fp: IO[bytes],
data: list[str],
linked_hash_set: bool = False,
) -> int:
write_start_offset = fp.tell()
if linked_hash_set:
fp.write(LINKED_HASH_SET_INIT)
else:
fp.write(HASH_SET_INIT + HASH_SET_INIT2)
num_entries = len(data)
capacity = (
2 ** math.ceil(math.log2(num_entries / HASH_SET_CAPACITY_FACTOR))
if num_entries > 0
else 128
)
write_int(fp, capacity)
write_float(fp, HASH_SET_CAPACITY_FACTOR)
write_int(fp, num_entries)
for string in data:
write_byte(fp, 0x74)
write_string(fp, string)
write_byte(fp, 0x78)
return fp.tell() - write_start_offset
T = TypeVar("T")
def read_list(
fp: IO[bytes],
fun: "Callable[[IO[bytes]], T]",
) -> list[T]:
size = read_int(fp)
toc = struct.unpack(f">{size + 1}q", fp.read(8 * (size + 1)))
entries = []
for offset in toc[:-1]:
fp.seek(offset)
entries.append(fun(fp))
fp.seek(toc[-1])
return entries
def write_list(
fp: IO[bytes],
fun: "Callable[[IO[bytes], T], Any]",
entries: list[T],
) -> int:
write_start_offset = fp.tell()
size = len(entries)
write_int(fp, size)
toc_offset = fp.tell()
fp.seek(toc_offset + 8 * (size + 1))
toc = [fp.tell()]
for e in entries:
fun(fp, e)
toc.append(fp.tell())
fp.seek(toc_offset)
fp.write(struct.pack(f">{size + 1}q", *toc))
fp.seek(toc[-1])
return fp.tell() - write_start_offset
def read_entry_int(fp: IO[bytes]) -> int:
return read_int(fp)
def write_entry_int(fp: IO[bytes], entry: int) -> int:
return write_int(fp, entry)
def read_entry_source(fp: IO[bytes]) -> tuple[str, int]:
name = read_string(fp)
count = read_int(fp)
return name, count
def write_entry_source(fp: IO[bytes], entry: tuple[str, int]) -> int:
name, count = entry
return write_string(fp, name) + write_int(fp, count)
def read_entry_pairs(fp: IO[bytes]) -> tuple[int, list[tuple[str, str]]]:
src_idx = read_short(fp)
count = read_int(fp)
pairs = [(read_string(fp), read_string(fp)) for i in range(count)]
return src_idx, pairs
def write_entry_pairs(
fp: IO[bytes],
entry: tuple[int, list[tuple[str, str]]],
) -> int:
write_start_offset = fp.tell()
src_idx, pairs = entry
write_short(fp, src_idx)
write_int(fp, len(pairs))
for p in pairs:
write_string(fp, p[0])
write_string(fp, p[1])
return fp.tell() - write_start_offset
def read_entry_text(fp: IO[bytes]) -> tuple[int, str]:
src_idx = read_short(fp)
txt = read_string(fp)
return src_idx, txt
def write_entry_text(fp: IO[bytes], entry: tuple[int, str]) -> int:
src_idx, txt = entry
return write_short(fp, src_idx) + write_string(fp, txt)
def read_entry_html(fp: IO[bytes]) -> tuple[int, str, str]:
src_idx = read_short(fp)
title = read_string(fp)
read_int(fp) # len_raw
len_compr = read_int(fp)
b_compr = fp.read(len_compr)
with gzip.open(io.BytesIO(b_compr), "r") as zf:
# this is not modified UTF-8 (read_string), but actual UTF-8
html = zf.read().decode()
return src_idx, title, html
def write_entry_html(fp: IO[bytes], entry: tuple[int, str, str]) -> int:
write_start_offset = fp.tell()
src_idx, title, html = entry
b_html = "".join(c if ord(c) < 128 else f"&#{ord(c)};" for c in html).encode()
ib_compr = io.BytesIO()
with gzip.GzipFile(fileobj=ib_compr, mode="wb") as zf:
# note that the compressed bytes might differ from the original Java
# implementation that uses GZIPOutputStream
zf.write(b_html)
ib_compr.seek(0)
b_compr = ib_compr.read()
write_short(fp, src_idx)
write_string(fp, title)
write_int(fp, len(b_html))
write_int(fp, len(b_compr))
fp.write(b_compr)
return fp.tell() - write_start_offset
IndexEntryType = tuple[
str, # token
int, # start_index
int, # count
str, # token_norm
list[int], # html_indices
]
EntryIndexTuple = tuple[
str, # short_name
str, # long_name
str, # iso
str, # normalizer_rules
bool, # swap_flag
int, # main_token_count
list[IndexEntryType], # index_entries
list[str], # stop_list,
list[tuple[int, int]], # rows
]
def read_entry_index(fp: IO[bytes]) -> EntryIndexTuple:
short_name = read_string(fp)
long_name = read_string(fp)
iso = read_string(fp)
normalizer_rules = read_string(fp)
swap_flag = read_bool(fp)
main_token_count = read_int(fp)
index_entries = read_list(fp, read_entry_indexentry)
stop_list_size = read_int(fp)
stop_list_offset = fp.tell()
stop_list = read_hashset(fp)
assert fp.tell() == stop_list_offset + stop_list_size
num_rows = read_int(fp)
row_size = read_int(fp)
row_data = fp.read(num_rows * row_size)
rows = [
# <type>, <index>
struct.unpack(">bi", row_data[j : j + row_size])
for j in range(0, len(row_data), row_size)
]
return (
short_name,
long_name,
iso,
normalizer_rules,
swap_flag,
main_token_count,
index_entries,
stop_list,
rows,
)
def write_entry_index(
fp: IO[bytes],
entry: EntryIndexTuple,
) -> int:
write_start_offset = fp.tell()
(
short_name,
long_name,
iso,
normalizer_rules,
swap_flag,
main_token_count,
index_entries,
stop_list,
rows,
) = entry
write_string(fp, short_name)
write_string(fp, long_name)
write_string(fp, iso)
write_string(fp, normalizer_rules)
write_bool(fp, swap_flag)
write_int(fp, main_token_count)
write_list(fp, write_entry_indexentry, index_entries)
stop_list_size_offset = fp.tell()
stop_list_offset = stop_list_size_offset + write_int(fp, 0)
stop_list_size = write_hashset(fp, stop_list, linked_hash_set=True)
fp.seek(stop_list_size_offset)
write_int(fp, stop_list_size)
fp.seek(stop_list_offset + stop_list_size)
write_int(fp, len(rows))
write_int(fp, 5)
row_data = b"".join([struct.pack(">bi", t, i) for t, i in rows])
fp.write(row_data)
return fp.tell() - write_start_offset
def read_entry_indexentry(fp: IO[bytes]) -> IndexEntryType:
token = read_string(fp)
start_index = read_int(fp)
count = read_int(fp)
has_normalized = read_bool(fp)
token_norm = read_string(fp) if has_normalized else ""
html_indices = read_list(fp, read_entry_int)
return token, start_index, count, token_norm, html_indices
def write_entry_indexentry(
fp: IO[bytes],
entry: IndexEntryType,
) -> None:
token, start_index, count, token_norm, html_indices = entry
has_normalized = bool(token_norm)
write_string(fp, token)
write_int(fp, start_index)
write_int(fp, count)
write_bool(fp, has_normalized)
if has_normalized:
write_string(fp, token_norm)
write_list(fp, write_entry_int, html_indices)
class Comparator:
def __init__(self, locale_str: str, normalizer_rules: str, version: int) -> None:
import icu
self.version = version
self.locale = icu.Locale(locale_str)
self._comparator = (
icu.RuleBasedCollator("&z<ȝ")
if self.locale.getLanguage() == "en"
else icu.Collator.createInstance(self.locale)
)
self._comparator.setStrength(icu.Collator.IDENTICAL)
self.normalizer_rules = normalizer_rules
self.normalize = icu.Transliterator.createFromRules(
"",
self.normalizer_rules,
icu.UTransDirection.FORWARD,
).transliterate
def compare(
self,
tup1: "tuple[str, str]",
tup2: "tuple[str, str]",
) -> Literal[0, 1, -1]:
# assert isinstance(tup1, tuple)
# assert isinstance(tup2, tuple)
s1, n1 = tup1
s2, n2 = tup2
cn = self._compare_without_dash(n1, n2)
if cn != 0:
return cn
cn = self._comparator.compare(n1, n2)
if cn != 0 or self.version < 7:
return cn
return self._comparator.compare(s1, s2)
def _compare_without_dash(
self,
a: str,
b: str,
) -> Literal[0, 1, -1]:
if self.version < 7:
return 0
s1 = self._without_dash(a)
s2 = self._without_dash(b)
return self._comparator.compare(s1, s2)
@staticmethod
def _without_dash(a: str) -> str:
return a.replace("-", "").replace("þ", "th").replace("Þ", "Th")
class QuickDic:
def __init__( # noqa: PLR0913
self,
name: str,
sources: list[tuple[str, int]],
pairs: list[tuple[int, list[tuple[str, str]]]],
texts: list[tuple[int, str]],
htmls: list[tuple[int, str, str]],
version: int = 6,
indices: list[EntryIndexTuple] | None = None,
created: dt.datetime | None = None,
) -> None:
self.name = name
self.sources = sources
self.pairs = pairs
self.texts = texts
self.htmls = htmls
self.version = version
self.indices = [] if indices is None else indices
self.created = dt.datetime.now() if created is None else created
@classmethod
def from_path(cls: type[QuickDic], path_str: str) -> QuickDic:
path = pathlib.Path(path_str)
if path.suffix != ".zip":
with open(path, "rb") as fp:
return cls.from_fp(fp)
with zipfile.ZipFile(path, mode="r") as zf:
fname = next(n for n in zf.namelist() if n.endswith(".quickdic"))
with zf.open(fname) as fp:
return cls.from_fp(fp)
@classmethod
def from_fp(cls: type[QuickDic], fp: IO[bytes]) -> QuickDic:
version = read_int(fp)
created = dt.datetime.fromtimestamp(float(read_long(fp)) / 1000.0) # noqa: DTZ006
name = read_string(fp)
sources = read_list(fp, read_entry_source)
pairs = read_list(fp, read_entry_pairs)
texts = read_list(fp, read_entry_text)
htmls = read_list(fp, read_entry_html)
indices = read_list(fp, read_entry_index)
assert read_string(fp) == "END OF DICTIONARY"
return cls(
name,
sources,
pairs,
texts,
htmls,
version=version,
indices=indices,
created=created,
)
def add_index( # noqa: PLR0913
self,
short_name: str,
long_name: str,
iso: str,
normalizer_rules: str,
synonyms: dict | None = None,
) -> None:
swap_flag = False
comparator = Comparator(iso, normalizer_rules, self.version)
synonyms = synonyms or {}
n_synonyms = sum(len(v) for v in synonyms.values())
log.info(f"Adding an index for {iso} with {n_synonyms} synonyms ...")
# since we don't tokenize, the stop list is always empty
stop_list: list[str] = []
if self.indices is None:
self.indices = []
log.info("Initialize token list ...")
tokens1 = [
(pair[1 if swap_flag else 0], 0, idx)
for idx, (_, pairs) in enumerate(self.pairs)
for pair in pairs
]
if not swap_flag:
tokens1.extend(
[(title, 4, idx) for idx, (_, title, _) in enumerate(self.htmls)],
)
tokens1 = [(t.strip(), ttype, tidx) for t, ttype, tidx in tokens1]
log.info("Normalize tokens ...")
tokens = [
(t, comparator.normalize(t), ttype, tidx) for t, ttype, tidx in tokens1 if t
]
if len(synonyms) > 0:
log.info(
f"Insert synonyms into token list ({len(tokens)} entries) ...",
)
tokens.extend(
[
(s, comparator.normalize(s)) + t[2:]
for t in tokens
if t[0] in synonyms
for s in synonyms[t[0]]
if s
],
)
log.info(f"Sort tokens with synonyms ({len(tokens)} entries) ...")
key_fun = functools.cmp_to_key(comparator.compare)
tokens.sort(key=lambda t: key_fun((t[0], t[1])))
log.info("Build mid-layer index ...")
rows: list[tuple[int, int]] = []
index_entries: list[IndexEntryType] = []
for token, token_norm, ttype, tidx in tokens:
prev_token = "" if len(index_entries) == 0 else index_entries[-1][0]
if prev_token == token:
(
token, # noqa: PLW2901
index_start,
count,
token_norm, # noqa: PLW2901
html_indices,
) = index_entries.pop()
else:
i_entry = len(index_entries)
index_start = len(rows)
count = 0
token_norm = "" if token == token_norm else token_norm # noqa: PLW2901
html_indices = []
rows.append((1, i_entry))
if ttype == 4:
if tidx not in html_indices:
html_indices.append(tidx)
elif (ttype, tidx) not in rows[index_start + 1 :]:
rows.append((ttype, tidx))
count += 1
index_entries.append(
(token, index_start, count, token_norm, html_indices),
)
# the exact meaning of this parameter is unknown,
# and it seems to be ignored by readers
main_token_count = len(index_entries)
self.indices.append(
(
short_name,
long_name,
iso,
normalizer_rules,
swap_flag,
main_token_count,
index_entries,
stop_list,
rows,
),
)
def write(self, path: str) -> None:
with open(path, "wb") as fp:
log.info(f"Writing to {path} ...")
write_int(fp, self.version)
write_long(fp, int(self.created.timestamp() * 1000))
write_string(fp, self.name)
write_list(fp, write_entry_source, self.sources)
write_list(fp, write_entry_pairs, self.pairs)
write_list(fp, write_entry_text, self.texts)
write_list(fp, write_entry_html, self.htmls)
write_list(fp, write_entry_index, self.indices)
write_string(fp, "END OF DICTIONARY")
class Reader:
depends = {
"icu": "PyICU",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._dic: QuickDic | None = None
def open(self, filename: str) -> None:
self._filename = filename
self._dic = QuickDic.from_path(self._filename)
self._glos.setDefaultDefiFormat("h")
self._extract_synonyms_from_indices()
def _extract_synonyms_from_indices(self) -> None:
self._text_tokens: dict[int, str] = {}
self._synonyms: dict[tuple[int, int], set[str]] = {}
assert self._dic is not None
for index in self._dic.indices:
_, _, _, _, swap_flag, _, index_entries, _, _ = index
# Note that we ignore swapped indices because pyglossary assumes
# uni-directional dictionaries.
# It might make sense to add an option in the future to read only the
# swapped indices (create a dictionary with reversed direction).
if swap_flag:
continue
for i_entry, index_entry in enumerate(index_entries):
e_rows = self._extract_rows_from_indexentry(index, i_entry)
token, _, _, token_norm, _ = index_entry
for entry_id in e_rows:
if entry_id not in self._synonyms:
self._synonyms[entry_id] = set()
self._synonyms[entry_id].add(token)
if token_norm:
self._synonyms[entry_id].add(token_norm)
def _extract_rows_from_indexentry(
self,
index: EntryIndexTuple,
i_entry: int,
recurse: list[int] | None = None,
) -> list[tuple[int, int]]:
recurse = recurse or []
recurse.append(i_entry)
_, _, _, _, _, _, index_entries, _, rows = index
token, start_index, count, _, html_indices = index_entries[i_entry]
block_rows = rows[start_index : start_index + count + 1]
assert block_rows[0][0] in {1, 3}
assert block_rows[0][1] == i_entry
e_rows = []
for entry_type, entry_idx in block_rows[1:]:
if entry_type in {1, 3}:
# avoid an endless recursion
if entry_idx not in recurse:
e_rows.extend(
self._extract_rows_from_indexentry(
index,
entry_idx,
recurse=recurse,
),
)
else:
e_rows.append((entry_type, entry_idx))
if entry_type == 2 and entry_idx not in self._text_tokens:
self._text_tokens[entry_idx] = token
for idx in html_indices:
if (4, idx) not in e_rows:
e_rows.append((4, idx))
return e_rows
def close(self) -> None:
self.clear()
def clear(self) -> None:
self._filename = ""
self._dic = None
def __len__(self) -> int:
if self._dic is None:
return 0
return sum(len(p) for _, p in self._dic.pairs) + len(self._dic.htmls)
def __iter__(self) -> typing.Iterator[EntryType]:
if self._dic is None:
raise RuntimeError("dictionary not open")
for idx, (_, pairs) in enumerate(self._dic.pairs):
syns = self._synonyms.get((0, idx), set())
for word, defi in pairs:
l_word = [word] + sorted(syns.difference({word}))
yield self._glos.newEntry(l_word, defi, defiFormat="m")
for idx, (_, defi) in enumerate(self._dic.texts):
if idx not in self._text_tokens:
# Ignore this text entry since it is not mentioned in the index at all
# so that we don't even have a token or title for it.
continue
word = self._text_tokens[idx]
syns = self._synonyms.get((2, idx), set())
l_word = [word] + sorted(syns.difference({word}))
yield self._glos.newEntry(l_word, defi, defiFormat="m")
for idx, (_, word, defi) in enumerate(self._dic.htmls):
syns = self._synonyms.get((4, idx), set())
l_word = [word] + sorted(syns.difference({word}))
yield self._glos.newEntry(l_word, defi, defiFormat="h")
class Writer:
_normalizer_rules = ""
_source_lang = ""
_target_lang = ""
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._dic = None
def finish(self) -> None:
self._filename = ""
self._dic = None
def open(self, filename: str) -> None:
self._filename = filename
def write(self) -> typing.Generator[None, EntryType, None]:
synonyms: dict[str, list[str]] = {}
htmls = []
log.info("Converting individual entries ...")
while True:
entry = yield
if entry is None:
break
if entry.isData():
log.warn(f"Ignoring binary data entry {entry.l_word[0]}")
continue
entry.detectDefiFormat()
if entry.defiFormat not in {"h", "m"}:
log.error(f"Unsupported defiFormat={entry.defiFormat}, assuming 'h'")
words = entry.l_word
if words[0] in synonyms:
synonyms[words[0]].extend(words[1:])
else:
synonyms[words[0]] = words[1:]
# Note that we currently write out all entries as "html" type entries.
# In the future, it might make sense to add an option that somehow
# specifies the entry type to use.
htmls.append((0, words[0], entry.defi))
log.info("Collecting meta data ...")
name = self._glos.getInfo("bookname")
if not name:
name = self._glos.getInfo("description")
sourceLang = (
self._glos.sourceLang
if not self._source_lang
else langDict[self._source_lang]
)
targetLang = (
self._glos.targetLang
if not self._target_lang
else langDict[self._target_lang]
)
if sourceLang and targetLang:
sourceLang = sourceLang.code
targetLang = targetLang.code
else:
# fallback if no languages are specified
sourceLang = targetLang = "EN"
langs = f"{sourceLang}->{targetLang}"
if langs not in name.lower():
name = f"{self._glos.getInfo('name')} ({langs})"
sources = [("", len(htmls))]
pairs = []
texts = []
self._dic = QuickDic(name, sources, pairs, texts, htmls)
short_name = long_name = iso = sourceLang
normalizer_rules = (
self._normalizer_rules or default_de_normalizer_rules
if iso == "DE"
else default_normalizer_rules
)
self._dic.add_index(
short_name,
long_name,
iso,
normalizer_rules,
synonyms=synonyms,
)
self._dic.write(self._filename)
| 23,295
|
Python
|
.py
| 763
| 27.580603
| 84
| 0.679886
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,816
|
xdxf_lax.py
|
ilius_pyglossary/pyglossary/plugins/xdxf_lax.py
|
# -*- coding: utf-8 -*-
#
from __future__ import annotations
"""Lax implementation of xdxf reader."""
#
# Copyright © 2023 Saeed Rasooli
# Copyright © 2016 ivan tkachenko me@ratijas.tk
#
# some parts of this file include code from:
# Aard Dictionary Tools <http://aarddict.org>.
# Copyright © 2008-2009 Igor Tkach
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re
import typing
from collections.abc import Iterator, Sequence
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import io
from lxml.html import HtmlElement as Element
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import log
from pyglossary.io_utils import nullBinaryIO
from pyglossary.option import (
BoolOption,
Option,
)
from pyglossary.text_utils import toStr
from pyglossary.xdxf.transform import XdxfTransformer
from pyglossary.xdxf.xsl_transform import XslXdxfTransformer
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "xdxf_lax"
format = "XdxfLax"
description = "XDXF Lax (.xdxf)"
extensions = ()
extensionCreate = ".xdxf"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/XDXF"
website = (
"https://github.com/soshial/xdxf_makedict/tree/master/format_standard",
"XDXF standard - @soshial/xdxf_makedict",
)
optionsProp: "dict[str, Option]" = {
"html": BoolOption(comment="Entries are HTML"),
"xsl": BoolOption(
comment="Use XSL transformation",
),
}
if TYPE_CHECKING:
class TransformerType(typing.Protocol):
def transform(self, article: Element) -> str: ...
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
_html: bool = True
_xsl: bool = False
infoKeyMap = {
"full_name": "name",
"full_title": "name",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: "io.IOBase" = nullBinaryIO
self._encoding = "utf-8"
self._htmlTr: "TransformerType | None" = None
self._re_span_k = re.compile(
'<span class="k">[^<>]*</span>(<br/>)?',
)
def readUntil(self, untilByte: bytes) -> tuple[int, bytes]:
_file = self._file
buf = b""
while True:
tmp = _file.read(100)
if not tmp:
break
buf += tmp
index = buf.find(untilByte)
if index < 0:
continue
_file.seek(_file.tell() - len(buf) + index)
return index, buf[:index]
return -1, buf
def _readOneMetadata(self, tag: str, infoKey: str) -> None:
from lxml.etree import XML
endTag = f"</{tag}>".encode("ascii")
descStart, _ = self.readUntil(f"<{tag}>".encode("ascii"))
if descStart < 0:
log.warning(f"did not find {tag} open")
return
descEnd, desc = self.readUntil(endTag)
if descEnd < 0:
log.warning(f"did not find {tag} close")
return
desc += endTag
elem = XML(desc)
if elem.text:
self._glos.setInfo(infoKey, elem.text)
def readMetadata(self) -> None:
_file = self._file
pos = _file.tell()
self._readOneMetadata("full_name", "title")
_file.seek(pos)
self._readOneMetadata("description", "description")
def open(self, filename: str) -> None:
# <!DOCTYPE xdxf SYSTEM "http://xdxf.sourceforge.net/xdxf_lousy.dtd">
self._filename = filename
if self._html:
if self._xsl:
self._htmlTr = XslXdxfTransformer(encoding=self._encoding)
else:
self._htmlTr = XdxfTransformer(encoding=self._encoding)
self._glos.setDefaultDefiFormat("h")
else:
self._glos.setDefaultDefiFormat("x")
cfile = self._file = compressionOpen(self._filename, mode="rb")
self.readMetadata()
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
self._glos.setInfo("input_file_size", f"{self._fileSize}")
def __len__(self) -> int:
return 0
def __iter__(self) -> Iterator[EntryType]:
from lxml.html import fromstring, tostring
while True:
start, _ = self.readUntil(b"<ar")
if start < 0:
break
end, b_article = self.readUntil(b"</ar>")
if end < 0:
break
b_article += b"</ar>"
s_article = b_article.decode("utf-8")
try:
article = cast("Element", fromstring(s_article))
except Exception as e:
log.exception(s_article)
raise e from None
words = [toStr(w) for w in self.titles(article)]
if self._htmlTr:
defi = self._htmlTr.transform(article)
defiFormat = "h"
if len(words) == 1:
defi = self._re_span_k.sub("", defi)
else:
b_defi = cast(bytes, tostring(article, encoding=self._encoding))
defi = b_defi[4:-5].decode(self._encoding).strip()
defiFormat = "x"
# log.info(f"{defi=}, {words=}")
yield self._glos.newEntry(
words,
defi,
defiFormat=defiFormat,
byteProgress=(self._file.tell(), self._fileSize),
)
def close(self) -> None:
if self._file:
self._file.close()
self._file = nullBinaryIO
@staticmethod
def tostring(
elem: Element,
) -> str:
from lxml.html import tostring
return (
tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
def titles(self, article: Element) -> list[str]:
"""
:param article: <ar> tag
:return: (title (str) | None, alternative titles (set))
"""
from itertools import combinations
titles: list[str] = []
for title_element in article.findall("k"):
if title_element.text is None:
# TODO: look for <opt> tag?
log.warning(f"empty title element: {self.tostring(title_element)}")
continue
n_opts = len([c for c in title_element if c.tag == "opt"])
if n_opts:
titles += [
self._mktitle(title_element, comb)
for j in range(n_opts + 1)
for comb in combinations(list(range(n_opts)), j)
]
else:
titles.append(self._mktitle(title_element))
return titles
def _mktitle( # noqa: PLR6301
self,
title_element: Element,
include_opts: "Sequence | None" = None,
) -> str:
if include_opts is None:
include_opts = ()
title = title_element.text or ""
opt_i = -1
for c in title_element:
if c.tag == "nu" and c.tail:
if title:
title += c.tail
else:
title = c.tail
if c.tag == "opt" and c.text is not None:
opt_i += 1
if opt_i in include_opts:
title += c.text
if c.tail:
title += c.tail
return title.strip()
| 6,906
|
Python
|
.py
| 249
| 24.654618
| 72
| 0.688832
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,817
|
csv_plugin.py
|
ilius_pyglossary/pyglossary/plugins/csv_plugin.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013-2019 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# This file is part of PyGlossary project, https://github.com/ilius/pyglossary
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
import csv
import os
from collections.abc import Generator, Iterable, Iterator
from os.path import isdir, join
from typing import TYPE_CHECKING, cast
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import log
from pyglossary.io_utils import nullTextIO
from pyglossary.option import (
BoolOption,
EncodingOption,
NewlineOption,
Option,
)
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "csv"
format = "Csv"
description = "CSV (.csv)"
extensions = (".csv",)
extensionCreate = ".csv"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/Comma-separated_values"
website = None
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"newline": NewlineOption(),
"resources": BoolOption(
comment="Enable resources / data files",
),
"delimiter": Option(
typ="str",
customValue=True,
values=[",", ";", "@"],
comment="Column delimiter",
),
"add_defi_format": BoolOption(
comment="enable adding defiFormat (m/h/x)",
),
"enable_info": BoolOption(
comment="Enable glossary info / metedata",
),
"word_title": BoolOption(
comment="add headwords title to beginning of definition",
),
}
csv.field_size_limit(0x7FFFFFFF)
class Reader:
compressions = stdCompressions
_encoding: str = "utf-8"
_newline: str = "\n"
_delimiter: str = ","
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self.clear()
def clear(self) -> None:
self._filename = ""
self._file: "io.TextIOBase" = nullTextIO
self._fileSize = 0
self._leadingLinesCount = 0
self._wordCount: "int | None" = None
self._pos = -1
self._csvReader: "Iterable[list[str]] | None" = None
self._resDir = ""
self._resFileNames: list[str] = []
self._bufferRow: "list[str] | None" = None
def open(
self,
filename: str,
) -> None:
from pyglossary.text_reader import TextFilePosWrapper
self._filename = filename
cfile = cast(
"io.TextIOBase",
compressionOpen(
filename,
mode="rt",
encoding=self._encoding,
newline=self._newline,
),
)
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
# self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("CSV Reader: file is not seekable")
self._file = TextFilePosWrapper(cfile, self._encoding)
self._csvReader = csv.reader(
self._file,
dialect="excel",
delimiter=self._delimiter,
)
self._resDir = filename + "_res"
if isdir(self._resDir):
self._resFileNames = os.listdir(self._resDir)
else:
self._resDir = ""
self._resFileNames = []
for row in self._csvReader:
if not row:
continue
if not row[0].startswith("#"):
self._bufferRow = row
break
if len(row) < 2:
log.error(f"invalid row: {row}")
continue
self._glos.setInfo(row[0].lstrip("#"), row[1])
def close(self) -> None:
if self._file:
try:
self._file.close()
except Exception:
log.exception("error while closing csv file")
self.clear()
def __len__(self) -> int:
from pyglossary.file_utils import fileCountLines
if self._wordCount is None:
if hasattr(self._file, "compression"):
return 0
log.debug("Try not to use len(reader) as it takes extra time")
self._wordCount = fileCountLines(self._filename) - self._leadingLinesCount
return self._wordCount + len(self._resFileNames)
def _iterRows(self) -> Iterator[list[str]]:
if self._csvReader is None:
raise RuntimeError("self._csvReader is None")
if self._bufferRow:
yield self._bufferRow
yield from self._csvReader
def _processRow(self, row: list[str]) -> EntryType | None:
if not row:
return None
word: "str | list[str]"
try:
word = row[0]
defi = row[1]
except IndexError:
log.error(f"invalid row: {row!r}")
return None
try:
alts = row[2].split(",")
except IndexError:
pass
else:
word = [word] + alts
return self._glos.newEntry(
word,
defi,
byteProgress=(
(self._file.tell(), self._fileSize) if self._fileSize else None
),
)
def __iter__(self) -> Iterator[EntryType | None]:
if not self._csvReader:
raise RuntimeError("iterating over a reader while it's not open")
wordCount = 0
for row in self._iterRows():
wordCount += 1
yield self._processRow(row)
self._wordCount = wordCount
resDir = self._resDir
for fname in self._resFileNames:
with open(join(resDir, fname), "rb") as _file:
yield self._glos.newDataEntry(
fname,
_file.read(),
)
class Writer:
compressions = stdCompressions
_encoding: str = "utf-8"
_newline: str = "\n"
_resources: bool = True
_delimiter: str = ","
_add_defi_format: bool = False
_enable_info: bool = True
_word_title: bool = False
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._file: "io.TextIOBase" = nullTextIO
def open(self, filename: str) -> None:
self._filename = filename
self._file = cast(
"io.TextIOBase",
compressionOpen(
filename,
mode="wt",
encoding=self._encoding,
newline=self._newline,
),
)
self._resDir = resDir = filename + "_res"
self._csvWriter = csv.writer(
self._file,
dialect="excel",
quoting=csv.QUOTE_ALL, # FIXME
delimiter=self._delimiter,
)
if not isdir(resDir):
os.mkdir(resDir)
if self._enable_info:
for key, value in self._glos.iterInfo():
self._csvWriter.writerow([f"#{key}", value])
def finish(self) -> None:
self._filename = ""
self._file.close()
self._file = nullTextIO
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
def write(self) -> Generator[None, EntryType, None]:
resources = self._resources
add_defi_format = self._add_defi_format
glos = self._glos
resDir = self._resDir
writer = self._csvWriter
word_title = self._word_title
while True:
entry = yield
if entry is None:
break
if entry.isData():
if resources:
entry.save(resDir)
continue
words = entry.l_word
if not words:
continue
word, alts = words[0], words[1:]
defi = entry.defi
if word_title:
defi = glos.wordTitleStr(words[0]) + defi
row = [
word,
defi,
]
if add_defi_format:
entry.detectDefiFormat()
row.append(entry.defiFormat)
if alts:
row.append(",".join(alts))
writer.writerow(row)
| 7,457
|
Python
|
.py
| 282
| 23.37234
| 78
| 0.694281
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,818
|
wordnet.py
|
ilius_pyglossary/pyglossary/plugins/wordnet.py
|
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License <http://www.gnu.org/licenses/gpl-3.0.txt>
# for more details.
#
# Copyright (C) 2023 Saeed Rasooli
# Copyright (C) 2015 Igor Tkach
#
# This plugin is based on https://github.com/itkach/wordnet2slob
from __future__ import annotations
import io
import os
import re
import sys
from collections import defaultdict
from collections.abc import Iterator
from typing import TYPE_CHECKING
from pyglossary.core import log
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "wordnet"
format = "Wordnet"
description = "WordNet"
extensions = ()
extensionCreate = ""
singleFile = False
kind = "directory"
wiki = "https://en.wikipedia.org/wiki/WordNet"
website = (
"https://wordnet.princeton.edu/",
"WordNet - A Lexical Database for English",
)
# key is option/argument name, value is instance of Option
optionsProp: "dict[str, Option]" = {}
# original expression from
# http://stackoverflow.com/questions/694344/regular-expression-that-matches-between-quotes-containing-escaped-quotes
# "(?:[^\\"]+|\\.)*"
# some examples don't have closing quote which
# make the subn with this expression hang
# quotedTextPattern = re.compile(r'"(?:[^"]+|\.)*["|\n]')
# make it a capturing group so that we can get rid of quotes
quotedTextPattern = re.compile(r'"([^"]+)"')
refPattern = re.compile(r"`(\w+)'")
class SynSet:
def __init__(self, line: str | bytes) -> None:
self.line = line
if isinstance(line, bytes):
line = line.decode("utf-8")
meta, self.gloss = line.split("|")
self.meta_parts = meta.split()
@property
def offset(self) -> int:
return int(self.meta_parts[0])
@property
def lex_filenum(self) -> str:
return self.meta_parts[1]
@property
def ss_type(self) -> str:
return self.meta_parts[2]
@property
def w_cnt(self) -> int:
return int(self.meta_parts[3], 16)
@property
def words(self) -> list[str]:
return [self.meta_parts[4 + 2 * i].replace("_", " ") for i in range(self.w_cnt)]
@property
def pointers(self) -> list[Pointer]:
p_cnt_index = 4 + 2 * self.w_cnt
p_cnt = self.meta_parts[p_cnt_index]
pointer_count = int(p_cnt)
start = p_cnt_index + 1
return [
Pointer(*self.meta_parts[start + i * 4 : start + (i + 1) * 4]) # type: ignore
for i in range(pointer_count)
]
def __repr__(self) -> str:
return f"SynSet({self.line!r})"
class PointerSymbols:
n = {
"!": "Antonyms",
"@": "Hypernyms",
"@i": "Instance hypernyms",
"~": "Hyponyms",
"~i": "Instance hyponyms",
"#m": "Member holonyms",
"#s": "Substance holonyms",
"#p": "Part holonyms",
"%m": "Member meronyms",
"%s": "Substance meronyms",
"%p": "Part meronyms",
"=": "Attributes",
"+": "Derivationally related forms",
";c": "Domain of synset - TOPIC",
"-c": "Member of this domain - TOPIC",
";r": "Domain of synset - REGION",
"-r": "Member of this domain - REGION",
";u": "Domain of synset - USAGE",
"-u": "Member of this domain - USAGE",
"^": "Also see",
}
v = {
"!": "Antonyms",
"@": "Hypernyms",
"~": "Hyponyms",
"*": "Entailments",
">": "Cause",
"^": "Also see",
"$": "Verb group",
"+": "Derivationally related forms",
";c": "Domain of synset - TOPIC",
";r": "Domain of synset - REGION",
";u": "Domain of synset - USAGE",
}
a = s = {
"!": "Antonyms",
"+": "Derivationally related forms",
"&": "Similar to",
"<": "Participle of verb",
"\\": "Pertainyms",
"=": "Attributes",
"^": "Also see",
";c": "Domain of synset - TOPIC",
";r": "Domain of synset - REGION",
";u": "Domain of synset - USAGE",
}
r = {
"!": "Antonyms",
"\\": "Derived from adjective",
"+": "Derivationally related forms",
";c": "Domain of synset - TOPIC",
";r": "Domain of synset - REGION",
";u": "Domain of synset - USAGE",
"^": "Also see",
}
class Pointer:
def __init__(self, symbol: str, offset: str, pos: str, source_target: str) -> None:
self.symbol = symbol
self.offset = int(offset)
self.pos = pos
self.source_target = source_target
self.source = int(source_target[:2], 16)
self.target = int(source_target[2:], 16)
def __repr__(self) -> str:
return (
f"Pointer({self.symbol!r}, {self.offset!r}, "
f"{self.pos!r}, {self.source_target!r})"
)
class WordNet:
article_template = "<h1>%s</h1><span>%s</span>"
synSetTypes = {
"n": "n.",
"v": "v.",
"a": "adj.",
"s": "adj. satellite",
"r": "adv.",
}
file2pos = {
"data.adj": ["a", "s"],
"data.adv": ["r"],
"data.noun": ["n"],
"data.verb": ["v"],
}
def __init__(self, wordnetdir: str) -> None:
self.wordnetdir = wordnetdir
self.collector: "dict[str, list[str]]" = defaultdict(list)
@staticmethod
def iterlines(dict_dir: str) -> Iterator[str]:
for name in os.listdir(dict_dir):
if not name.startswith("data."):
continue
with open(os.path.join(dict_dir, name), encoding="utf-8") as f:
for line in f:
if not line.startswith(" "):
yield line
# PLR0912 Too many branches (16 > 12)
def prepare(self) -> None: # noqa: PLR0912
synSetTypes = self.synSetTypes
file2pos = self.file2pos
dict_dir = self.wordnetdir
files: dict[str, io.TextIOWrapper] = {}
for name in os.listdir(dict_dir):
if name.startswith("data.") and name in file2pos:
f = open(os.path.join(dict_dir, name), encoding="utf-8") # noqa: SIM115
for key in file2pos[name]:
files[key] = f
def a(word: str) -> str:
return f'<a href="{word}">{word}</a>'
for index, line in enumerate(self.iterlines(dict_dir)):
if index % 100 == 0 and index > 0:
sys.stdout.write(".")
sys.stdout.flush()
if index % 5000 == 0 and index > 0:
sys.stdout.write("\n")
sys.stdout.flush()
if not line or not line.strip():
continue
synset = SynSet(line)
gloss_with_examples, _ = quotedTextPattern.subn(
lambda x: f'<cite class="ex">{x.group(1)}</cite>',
synset.gloss,
)
gloss_with_examples, _ = refPattern.subn(
lambda x: a(x.group(1)),
gloss_with_examples,
)
words = synset.words
for index2, word in enumerate(words):
# TODO: move this block to a func
synonyms = ", ".join(a(w) for w in words if w != word)
synonyms_str = (
f'<br/><small class="co">Synonyms:</small> {synonyms}'
if synonyms
else ""
)
pointers = defaultdict(list)
for pointer in synset.pointers:
if (
pointer.source
and pointer.target
and pointer.source - 1 != index2
):
continue
symbol = pointer.symbol
if symbol and symbol[:1] in {";", "-"}:
continue
try:
symbol_desc = getattr(PointerSymbols, synset.ss_type)[symbol]
except KeyError:
log.warning(
f"unknown pointer symbol {symbol} for {synset.ss_type} ",
)
symbol_desc = symbol
data_file = files[pointer.pos]
data_file.seek(pointer.offset)
referenced_synset = SynSet(data_file.readline())
if pointer.source == pointer.target == 0:
pointers[symbol_desc] = [
w for w in referenced_synset.words if w not in words
]
else:
referenced_word = referenced_synset.words[pointer.target - 1]
if referenced_word not in pointers[symbol_desc]:
pointers[symbol_desc].append(referenced_word)
pointers_str = "".join(
[
f'<br/><small class="co">{symbol_desc}:</small> '
+ ", ".join(a(w) for w in referenced_words)
for symbol_desc, referenced_words in pointers.items()
if referenced_words
],
)
self.collector[word].append(
f'<i class="pos grammar">{synSetTypes[synset.ss_type]}</i>'
f" {gloss_with_examples}{synonyms_str}{pointers_str}",
)
sys.stdout.write("\n")
sys.stdout.flush()
def process(self) -> Iterator[tuple[str, str]]:
article_template = self.article_template
for title in self.collector:
article_pieces = self.collector[title]
article_pieces_count = len(article_pieces)
text = None
if article_pieces_count > 1:
ol = ["<ol>"] + [f"<li>{ap}</li>" for ap in article_pieces] + ["</ol>"]
text = article_template % (title, "".join(ol))
elif article_pieces_count == 1:
text = article_template % (title, article_pieces[0])
if text:
yield title, text
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._wordCount = 0
self.wordnet: "WordNet | None" = None
def __len__(self) -> int:
return self._wordCount
def open(self, filename: str) -> None:
self.wordnet = WordNet(filename)
log.info("Running wordnet.prepare()")
self.wordnet.prepare()
# TODO: metadata
def close(self) -> None:
self.wordnet = None
def __iter__(self) -> Iterator[EntryType]:
if self.wordnet is None:
raise ValueError("self.wordnet is None")
glos = self._glos
for word, defi in self.wordnet.process():
yield glos.newEntry(word, defi)
| 9,455
|
Python
|
.py
| 313
| 26.846645
| 116
| 0.65105
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,819
|
aard2_slob.py
|
ilius_pyglossary/pyglossary/plugins/aard2_slob.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import re
import shutil
from os.path import isfile, splitext
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Generator, Iterator
from pyglossary import slob
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.core import cacheDir, exc_note, log, pip
from pyglossary.option import (
BoolOption,
FileSizeOption,
IntOption,
Option,
StrOption,
)
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "aard2_slob"
format = "Aard2Slob"
description = "Aard 2 (.slob)"
extensions = (".slob",)
extensionCreate = ".slob"
singleFile = True
kind = "binary"
wiki = "https://github.com/itkach/slob/wiki"
website = (
"http://aarddict.org/",
"aarddict.org",
)
optionsProp: "dict[str, Option]" = {
"compression": StrOption(
values=["", "bz2", "zlib", "lzma2"],
comment="Compression Algorithm",
),
"content_type": StrOption(
customValue=True,
values=[
"text/plain; charset=utf-8",
"text/html; charset=utf-8",
],
comment="Content Type",
),
# "encoding": EncodingOption(),
"file_size_approx": FileSizeOption(
comment="split up by given approximate file size\nexamples: 100m, 1g",
),
"file_size_approx_check_num_entries": IntOption(
comment="for file_size_approx, check every `[?]` entries",
),
"separate_alternates": BoolOption(
comment="add alternate headwords as separate entries to slob",
),
"word_title": BoolOption(
comment="add headwords title to beginning of definition",
),
"version_info": BoolOption(
comment="add version info tags to slob file",
),
"audio_goldendict": BoolOption(
comment="Convert audio links for GoldenDict (desktop)",
),
}
extraDocs = [
(
"PyICU",
"See [doc/pyicu.md](./doc/pyicu.md) file for more detailed"
" instructions on how to install PyICU.",
),
]
t_created_at = "created.at"
t_label = "label"
t_created_by = "created.by"
t_copyright = "copyright"
t_license_name = "license.name"
t_license_url = "license.url"
t_uri = "uri"
t_edition = "edition"
supported_tags = {
t_label,
t_created_at,
t_created_by,
t_copyright,
t_uri,
t_edition,
}
class Reader:
depends = {
"icu": "PyICU", # >=1.5
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
self._re_bword = re.compile(
"(<a href=[^<>]+?>)",
re.IGNORECASE,
)
def close(self) -> None:
if self._slobObj is not None:
self._slobObj.close()
self._clear()
def _clear(self) -> None:
self._filename = ""
self._slobObj: "slob.Slob | None" = None
# TODO: PLR0912 Too many branches (13 > 12)
def open(self, filename: str) -> None: # noqa: PLR0912
try:
import icu # type: ignore # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install PyICU` to install")
raise
from pyglossary import slob
self._filename = filename
self._slobObj = slob.open(filename)
tags = dict(self._slobObj.tags.items())
if t_label in tags:
self._glos.setInfo("name", tags[t_label])
if t_created_at in tags:
self._glos.setInfo("creationTime", tags[t_created_at])
if t_created_by in tags:
self._glos.setInfo("author", tags[t_created_by])
copyrightLines = []
for key in (t_copyright, t_license_name, t_license_url):
try:
value = tags.pop(key)
except KeyError:
continue
copyrightLines.append(value)
if copyrightLines:
self._glos.setInfo("copyright", "\n".join(copyrightLines))
if t_uri in tags:
self._glos.setInfo("website", tags[t_uri])
if t_edition in tags:
self._glos.setInfo("edition", tags[t_edition])
for key, value in tags.items():
if key in supported_tags:
continue
self._glos.setInfo(f"slob.{key}", value)
def __len__(self) -> int:
if self._slobObj is None:
log.error("called len() on a reader which is not open")
return 0
return len(self._slobObj)
@staticmethod
def _href_sub(m: "re.Match") -> str:
st = m.group(0)
if "//" in st:
return st
return st.replace('href="', 'href="bword://').replace(
"href='",
"href='bword://",
)
def __iter__(self) -> Iterator[EntryType | None]:
from pyglossary.slob import MIME_HTML, MIME_TEXT
if self._slobObj is None:
raise RuntimeError("iterating over a reader while it's not open")
slobObj = self._slobObj
blobSet = set()
# slob library gives duplicate blobs when iterating over slobObj
# even keeping the last id is not enough, since duplicate blobs
# are not all consecutive. so we have to keep a set of blob IDs
for blob in slobObj:
_id = blob.identity
if _id in blobSet:
yield None # update progressbar
continue
blobSet.add(_id)
# blob.key is str, blob.content is bytes
word = blob.key
ctype = blob.content_type.split(";")[0]
if ctype not in {MIME_HTML, MIME_TEXT}:
log.debug(f"unknown {blob.content_type=} in {word=}")
word = word.removeprefix("~/")
yield self._glos.newDataEntry(word, blob.content)
continue
defiFormat = ""
if ctype == MIME_HTML:
defiFormat = "h"
elif ctype == MIME_TEXT:
defiFormat = "m"
defi = blob.content.decode("utf-8")
defi = self._re_bword.sub(self._href_sub, defi)
yield self._glos.newEntry(word, defi, defiFormat=defiFormat)
class Writer:
depends = {
"icu": "PyICU",
}
_compression: str = "zlib"
_content_type: str = ""
_file_size_approx: int = 0
_file_size_approx_check_num_entries = 100
_separate_alternates: bool = False
_word_title: bool = False
_version_info: bool = False
_audio_goldendict: bool = False
resourceMimeTypes = {
"png": "image/png",
"jpeg": "image/jpeg",
"jpg": "image/jpeg",
"gif": "image/gif",
"svg": "image/svg+xml",
"webp": "image/webp",
"tiff": "image/tiff",
"tif": "image/tiff",
"bmp": "image/bmp",
"css": "text/css",
"js": "application/javascript",
"json": "application/json",
"woff": "application/font-woff",
"woff2": "application/font-woff2",
"ttf": "application/x-font-ttf",
"otf": "application/x-font-opentype",
"mp3": "audio/mpeg",
"ogg": "audio/ogg",
"spx": "audio/x-speex",
"wav": "audio/wav",
"ini": "text/plain",
# "application/octet-stream+xapian",
"eot": "application/vnd.ms-fontobject",
"pdf": "application/pdf",
"mp4": "video/mp4",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._resPrefix = ""
self._slobWriter: "slob.Writer | None" = None
@staticmethod
def _slobObserver(
event: "slob.WriterEvent", # noqa: F401, F821
) -> None:
log.debug(f"slob: {event.name}{': ' + event.data if event.data else ''}")
def _open(self, filename: str, namePostfix: str) -> slob.Writer:
from pyglossary import slob
if isfile(filename):
shutil.move(filename, f"{filename}.bak")
log.warning(f"renamed existing {filename!r} to {filename + '.bak'!r}")
self._slobWriter = slobWriter = slob.Writer(
filename,
observer=self._slobObserver,
workdir=cacheDir,
compression=self._compression,
version_info=self._version_info,
)
slobWriter.tag("label", self._glos.getInfo("name") + namePostfix)
return slobWriter
def open(self, filename: str) -> None:
try:
import icu # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install PyICU` to install")
raise
if isfile(filename):
raise OSError(f"File '{filename}' already exists")
namePostfix = ""
if self._file_size_approx > 0:
namePostfix = " (part 1)"
self._open(filename, namePostfix)
self._filename = filename
def finish(self) -> None:
from time import perf_counter
self._filename = ""
if self._slobWriter is None:
return
log.info("Finalizing slob file...")
t0 = perf_counter()
self._slobWriter.finalize()
log.info(f"Finalizing slob file took {perf_counter() - t0:.1f} seconds")
self._slobWriter = None
def addDataEntry(self, entry: EntryType) -> None:
slobWriter = self._slobWriter
if slobWriter is None:
raise ValueError("slobWriter is None")
rel_path = entry.s_word
_, ext = splitext(rel_path)
ext = ext.lstrip(os.path.extsep).lower()
content_type = self.resourceMimeTypes.get(ext)
if not content_type:
log.error(f"Aard2 slob: unknown content type for {rel_path!r}")
return
content = entry.data
key = self._resPrefix + rel_path
try:
key.encode(slobWriter.encoding)
except UnicodeEncodeError:
log.error(f"Failed to add, broken unicode in key: {key!a}")
return
slobWriter.add(content, key, content_type=content_type)
def addEntry(self, entry: EntryType) -> None:
words = entry.l_word
b_defi = entry.defi.encode("utf-8")
_ctype = self._content_type
writer = self._slobWriter
if writer is None:
raise ValueError("slobWriter is None")
entry.detectDefiFormat()
defiFormat = entry.defiFormat
if self._word_title and defiFormat in {"h", "m"}:
if defiFormat == "m":
defiFormat = "h"
title = self._glos.wordTitleStr(
words[0],
)
b_defi = title.encode("utf-8") + b_defi
if defiFormat == "h":
b_defi = b_defi.replace(b'"bword://', b'"')
b_defi = b_defi.replace(b"'bword://", b"'")
if not self._audio_goldendict:
b_defi = b_defi.replace(
b"""href="sound://""",
b'''onclick="new Audio(this.href).play(); return false;" href="''',
)
b_defi = b_defi.replace(
b"""href='sound://""",
b"""onclick="new Audio(this.href).play(); return false;" href='""",
)
b_defi = b_defi.replace(b"""<img src="/""", b'''<img src="''')
b_defi = b_defi.replace(b"""<img src='""", b"""<img src='""")
b_defi = b_defi.replace(b"""<img src="file:///""", b'''<img src="''')
b_defi = b_defi.replace(b"""<img src='file:///""", b"""<img src='""")
if not _ctype:
if defiFormat == "h":
_ctype = "text/html; charset=utf-8"
elif defiFormat == "m":
_ctype = "text/plain; charset=utf-8"
else:
_ctype = "text/plain; charset=utf-8"
if not self._separate_alternates:
writer.add(
b_defi,
*tuple(words),
content_type=_ctype,
)
return
headword, *alts = words
writer.add(
b_defi,
headword,
content_type=_ctype,
)
for alt in alts:
writer.add(
b_defi,
f"{alt}, {headword}",
content_type=_ctype,
)
def write(self) -> Generator[None, EntryType, None]:
slobWriter = self._slobWriter
if slobWriter is None:
raise ValueError("slobWriter is None")
file_size_approx = int(self._file_size_approx * 0.95)
entryCount = 0
sumBlobSize = 0
fileIndex = 0
filenameNoExt, _ = splitext(self._filename)
while True:
entry = yield
if entry is None:
break
if entry.isData():
self.addDataEntry(entry)
else:
self.addEntry(entry)
if file_size_approx <= 0:
continue
# handle file_size_approx
check_every = self._file_size_approx_check_num_entries
entryCount += 1
if entryCount % check_every == 0:
sumBlobSize = slobWriter.size_data()
if sumBlobSize >= file_size_approx:
slobWriter.finalize()
fileIndex += 1
slobWriter = self._open(
f"{filenameNoExt}.{fileIndex}.slob",
f" (part {fileIndex + 1})",
)
sumBlobSize = 0
entryCount = 0
| 11,269
|
Python
|
.py
| 396
| 25.222222
| 75
| 0.672215
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,820
|
dict_cc.py
|
ilius_pyglossary/pyglossary/plugins/dict_cc.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import html
from collections.abc import Callable, Iterator
from operator import itemgetter
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import sqlite3
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element, T_htmlfile
from pyglossary.option import Option
from pyglossary.core import log
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dict_cc"
format = "Dictcc"
description = "Dict.cc (SQLite3)"
extensions = ()
extensionCreate = ".db"
singleFile = True
kind = "binary"
wiki = "https://en.wikipedia.org/wiki/Dict.cc"
website = (
"https://play.google.com/store/apps/details?id=cc.dict.dictcc",
"dict.cc dictionary - Google Play",
)
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
self._con = connect(filename)
self._cur = self._con.cursor()
self._glos.setDefaultDefiFormat("h")
def __len__(self) -> int:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute(
"select count(distinct term1)+count(distinct term2) from main_ft",
)
return self._cur.fetchone()[0]
@staticmethod
def makeList(
hf: "T_htmlfile",
input_elements: list[Element],
processor: Callable,
single_prefix: str = "",
skip_single: bool = True,
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not input_elements:
return
if skip_single and len(input_elements) == 1:
hf.write(single_prefix)
processor(hf, input_elements[0])
return
with hf.element("ol"):
for el in input_elements:
with hf.element("li"):
processor(hf, el)
@staticmethod
def makeGroupsList(
hf: "T_htmlfile",
groups: "list[tuple[str, str]]",
processor: "Callable[[T_htmlfile, tuple[str, str]], None]",
single_prefix: str = "",
skip_single: bool = True,
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not groups:
return
if skip_single and len(groups) == 1:
hf.write(single_prefix)
processor(hf, groups[0])
return
with hf.element("ol"):
for el in groups:
with hf.element("li"):
processor(hf, el)
def writeSense( # noqa: PLR6301
self,
hf: "T_htmlfile",
row: "tuple[str, str]",
) -> None:
from lxml import etree as ET
trans, entry_type = row
if entry_type:
with hf.element("i"):
hf.write(f"{entry_type}")
hf.write(ET.Element("br"))
try:
hf.write(trans + " ")
except Exception as e:
log.error(f"error in writing {trans!r}, {e}")
hf.write(repr(trans) + " ")
else:
with hf.element("big"):
with hf.element("a", href=f"bword://{trans}"):
hf.write("�")
def iterRows(
self,
column1: str,
column2: str,
) -> Iterator[tuple[str, str, str]]:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute(
f"select {column1}, {column2}, entry_type from main_ft"
f" order by {column1}",
)
for row in self._cur.fetchall():
term1 = row[0]
term2 = row[1]
try:
term1 = html.unescape(term1)
except Exception as e:
log.error(f"html.unescape({term1!r}) -> {e}")
try:
term2 = html.unescape(term2)
except Exception as e:
log.error(f"html.unescape({term2!r}) -> {e}")
yield term1, term2, row[2]
def parseGender(self, headword: str) -> tuple[str | None, str]: # noqa: PLR6301
# {m} masc masculine German: maskulin
# {f} fem feminine German: feminin
# {n} neut neutral German: neutral
# { } ????
i = headword.find(" {")
if i <= 0:
return None, headword
if len(headword) < i + 4:
return None, headword
if headword[i + 3] != "}":
return None, headword
g = headword[i + 2]
gender = None
if g == "m":
gender = "masculine"
elif g == "f":
gender = "feminine"
elif g == "n":
gender = "neutral"
else:
log.warning(f"invalid gender {g!r}")
return None, headword
headword = headword[:i] + headword[i + 4 :]
return gender, headword
def _iterOneDirection(
self,
column1: str,
column2: str,
) -> Iterator[EntryType]:
from io import BytesIO
from itertools import groupby
from lxml import etree as ET
glos = self._glos
for headwordEscaped, groupsOrig in groupby(
self.iterRows(column1, column2),
key=itemgetter(0),
):
headword = html.unescape(headwordEscaped)
groups: "list[tuple[str, str]]" = [
(term2, entry_type) for _, term2, entry_type in groupsOrig
]
f = BytesIO()
gender, headword = self.parseGender(headword)
with ET.htmlfile(f, encoding="utf-8") as hf:
with hf.element("div"):
if gender:
with hf.element("i"):
hf.write(gender)
hf.write(ET.Element("br"))
self.makeGroupsList(
cast("T_htmlfile", hf),
groups,
self.writeSense,
)
defi = f.getvalue().decode("utf-8")
yield glos.newEntry(headword, defi, defiFormat="h")
def __iter__(self) -> Iterator[EntryType]:
yield from self._iterOneDirection("term1", "term2")
yield from self._iterOneDirection("term2", "term1")
def close(self) -> None:
if self._cur:
self._cur.close()
if self._con:
self._con.close()
self._clear()
| 5,567
|
Python
|
.py
| 208
| 23.495192
| 81
| 0.66798
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,821
|
dict_org.py
|
ilius_pyglossary/pyglossary/plugins/dict_org.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import re
from collections.abc import Generator, Iterator
from os.path import isdir, splitext
from typing import TYPE_CHECKING
from pyglossary.core import log
from pyglossary.flags import DEFAULT_NO
from pyglossary.option import BoolOption, Option
from pyglossary.plugin_lib.dictdlib import DictDB
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dict_org"
format = "DictOrg"
description = "DICT.org file format (.index)"
extensions = (".index",)
extensionCreate = ""
singleFile = False
optionsProp: "dict[str, Option]" = {
"dictzip": BoolOption(comment="Compress .dict file to .dict.dz"),
"install": BoolOption(comment="Install dictionary to /usr/share/dictd/"),
}
sortOnWrite = DEFAULT_NO
kind = "directory"
wiki = "https://en.wikipedia.org/wiki/DICT#DICT_file_format"
website = (
"http://dict.org/bin/Dict",
"The DICT Development Group",
)
def installToDictd(filename: str, dictzip: bool) -> None:
"""Filename is without extension (neither .index or .dict or .dict.dz)."""
import shutil
import subprocess
targetDir = "/usr/share/dictd/"
if filename.startswith(targetDir):
return
if not isdir(targetDir):
log.warning(f"Directory {targetDir!r} does not exist, skipping install")
return
log.info(f"Installing {filename!r} to DICTD server directory: {targetDir}")
if dictzip and os.path.isfile(filename + ".dict.dz"):
dictExt = ".dict.dz"
elif os.path.isfile(filename + ".dict"):
dictExt = ".dict"
else:
log.error(f"No .dict file, could not install dictd file {filename!r}")
return
if not filename.startswith(targetDir):
shutil.copy(filename + ".index", targetDir)
shutil.copy(filename + dictExt, targetDir)
# update /var/lib/dictd/db.list
if subprocess.call(["/usr/sbin/dictdconfig", "-w"]) != 0:
log.error(
"failed to update /var/lib/dictd/db.list file"
", try manually running: sudo /usr/sbin/dictdconfig -w",
)
log.info("don't forget to restart dictd server")
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._dictdb: "DictDB | None" = None
# regular expression patterns used to prettify definition text
self._re_newline_in_braces = re.compile(
r"\{(?P<left>.*?)\n(?P<right>.*?)?\}",
)
self._re_words_in_braces = re.compile(
r"\{(?P<word>.+?)\}",
)
def open(self, filename: str) -> None:
filename = filename.removesuffix(".index")
self._filename = filename
self._dictdb = DictDB(filename, "read", 1)
def close(self) -> None:
if self._dictdb is not None:
self._dictdb.close()
# self._dictdb.finish()
self._dictdb = None
def prettifyDefinitionText(self, defi: str) -> str:
# Handle words in {}
# First, we remove any \n in {} pairs
defi = self._re_newline_in_braces.sub(r"{\g<left>\g<right>}", defi)
# Then, replace any {words} into <a href="bword://words">words</a>,
# so it can be rendered as link correctly
defi = self._re_words_in_braces.sub(
r'<a href="bword://\g<word>">\g<word></a>',
defi,
)
# Use <br /> so it can be rendered as newline correctly
return defi.replace("\n", "<br />")
def __len__(self) -> int:
if self._dictdb is None:
return 0
return len(self._dictdb)
def __iter__(self) -> Iterator[EntryType]:
if self._dictdb is None:
raise RuntimeError("iterating over a reader while it's not open")
dictdb = self._dictdb
for word in dictdb.getDefList():
b_defi = b"\n\n<hr>\n\n".join(dictdb.getDef(word))
try:
defi = b_defi.decode("utf_8", "ignore")
defi = self.prettifyDefinitionText(defi)
except Exception as e:
log.error(f"{b_defi = }")
raise e
yield self._glos.newEntry(word, defi)
class Writer:
_dictzip: bool = False
_install: bool = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._dictdb: "DictDB | None" = None
def finish(self) -> None:
from pyglossary.os_utils import runDictzip
if self._dictdb is None:
raise RuntimeError("self._dictdb is None")
self._dictdb.finish(dosort=True)
if self._dictzip:
runDictzip(f"{self._filename}.dict")
if self._install:
installToDictd(
self._filename,
self._dictzip,
)
self._filename = ""
def open(self, filename: str) -> None:
filename_nox, ext = splitext(filename)
if ext.lower() == ".index":
filename = filename_nox
self._dictdb = DictDB(filename, "write", 1)
self._filename = filename
def write(self) -> Generator[None, EntryType, None]:
dictdb = self._dictdb
if dictdb is None:
raise RuntimeError("self._dictdb is None")
while True:
entry = yield
if entry is None:
break
if entry.isData():
# does dictd support resources? and how? FIXME
continue
dictdb.addEntry(entry.defi, entry.l_word)
| 5,036
|
Python
|
.py
| 162
| 28.271605
| 76
| 0.698823
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,822
|
dicformids.py
|
ilius_pyglossary/pyglossary/plugins/dicformids.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
from __future__ import annotations
import operator
import os
import re
from collections.abc import Iterator
from os.path import join
from typing import TYPE_CHECKING
from pyglossary.core import log
from pyglossary.flags import ALWAYS
from pyglossary.plugins.tabfile import Reader as TabfileReader
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
lname = "dicformids"
enable = True
format = "Dicformids"
description = "DictionaryForMIDs"
extensions = (".mids",)
extensionCreate = ".mids/"
singleFile = False
sortOnWrite = ALWAYS
sortKeyName = "dicformids"
sortEncoding = "utf-8"
kind = "directory"
wiki = ""
website = (
"http://dictionarymid.sourceforge.net/",
"DictionaryForMIDs - SourceForge",
)
optionsProp: "dict[str, Option]" = {}
PROP_TEMPLATE = """#DictionaryForMIDs property file
infoText={name}, author: {author}
indexFileMaxSize={indexFileMaxSize}\n
language1IndexNumberOfSourceEntries={wordCount}
language1DictionaryUpdateClassName=de.kugihan.dictionaryformids.dictgen.DictionaryUpdate
indexCharEncoding=ISO-8859-1
dictionaryFileSeparationCharacter='\\t'
language2NormationClassName=de.kugihan.dictionaryformids.translation.Normation
language2DictionaryUpdateClassName=de.kugihan.dictionaryformids.dictgen.DictionaryUpdate
logLevel=0
language1FilePostfix={directoryPostfix}
dictionaryCharEncoding=UTF-8
numberOfAvailableLanguages=2
language1IsSearchable=true
language2GenerateIndex=false
dictionaryFileMaxSize={dicMaxSize}
language2FilePostfix={language2FilePostfix}
searchListFileMaxSize=20000
language2IsSearchable=false
fileEncodingFormat=plain_format1
language1HasSeparateDictionaryFile=true
searchListCharEncoding=ISO-8859-1
searchListFileSeparationCharacter='\t'
indexFileSeparationCharacter='\t'
language1DisplayText={sourceLang}
language2HasSeparateDictionaryFile=false
dictionaryGenerationInputCharEncoding=UTF-8
language1GenerateIndex=true
language2DisplayText={targetLang}
language1NormationClassName=de.kugihan.dictionaryformids.translation.NormationEng
"""
class Reader:
re_number = re.compile(r"\d+")
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._tabFileNames = []
self._tabFileReader = None
def open(self, dirname: str) -> None:
self._dirname = dirname
orderFileNames = []
for fname in os.listdir(dirname):
if not fname.startswith("directory"):
continue
try:
num = self.re_number.findall(fname)[-1]
except IndexError:
pass
else:
orderFileNames.append((num, fname))
orderFileNames.sort(
key=operator.itemgetter(0),
reverse=True,
)
self._tabFileNames = [x[1] for x in orderFileNames]
self.nextTabFile()
def __len__(self) -> int:
raise NotImplementedError # FIXME
def __iter__(self) -> Iterator[EntryType]:
return self
def __next__(self) -> EntryType:
for _ in range(10):
try:
return next(self._tabFileReader)
except StopIteration: # noqa: PERF203
self._tabFileReader.close()
self.nextTabFile()
return None
def nextTabFile(self) -> None:
try:
tabFileName = self._tabFileNames.pop()
except IndexError:
raise StopIteration from None
self._tabFileReader = TabfileReader(self._glos, hasInfo=False)
self._tabFileReader.open(join(self._dirname, tabFileName), newline="\n")
def close(self) -> None:
if self._tabFileReader:
try:
self._tabFileReader.close()
except Exception:
pass # noqa: S110
self._tabFileReader = None
self._tabFileNames = []
class Writer:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self.linesPerDirectoryFile = 500 # 200
self.indexFileMaxSize = 32722 # 30000
self.directoryPostfix = ""
self.indexPostfix = ""
self._dirname = ""
# looks like we need to remove tabs, because app gives error
# but based on the java code, all punctuations should be removed
# as well, including '|'
self.re_punc = re.compile(
r"""[!"$§%&/()=?´`\\{}\[\]^°+*~#'\-_.:,;<>@|]*""", # noqa: RUF001
)
self.re_spaces = re.compile(" +")
self.re_tabs = re.compile("\t+")
def normateWord(self, word: str) -> str:
word = word.strip()
word = self.re_punc.sub("", word)
word = self.re_spaces.sub(" ", word)
word = self.re_tabs.sub(" ", word)
word = word.lower()
return word # noqa: RET504
def writeProbs(self) -> None:
glos = self._glos
probsPath = join(
self._dirname,
"DictionaryForMIDs.properties",
)
with open(probsPath, mode="w", newline="\n", encoding="utf-8") as fileObj:
fileObj.write(
PROP_TEMPLATE.format(
name=glos.getInfo("name"),
author=glos.author,
indexFileMaxSize=self.indexFileMaxSize,
wordCount=self.wordCount,
directoryPostfix=self.directoryPostfix,
dicMaxSize=self.dicMaxSize + 1,
language2FilePostfix="fa", # FIXME
sourceLang=glos.sourceLangName,
targetLang=glos.targetLangName,
),
)
def nextIndex(self) -> None:
try:
self.indexFp.close()
except AttributeError:
self.indexIndex = 0
self.indexIndex += 1
fname = f"index{self.indexPostfix}{self.indexIndex}.csv"
fpath = join(self._dirname, fname)
self.indexFp = open(fpath, mode="w", encoding="utf-8", newline="\n")
def finish(self) -> None:
pass
def open(self, dirname: str) -> None:
self._dirname = dirname
if not os.path.isdir(dirname):
os.mkdir(dirname)
def write(self) -> None:
self.nextIndex()
dicMaxSize = 0
indexData = []
def writeBucket(dicIndex: int, entryList: "list[EntryType]") -> None:
nonlocal dicMaxSize
log.debug(
f"{dicIndex=}, {len(entryList)=}, {dicMaxSize=}",
)
dicFp = open(
join(
self._dirname,
f"directory{self.directoryPostfix}{dicIndex + 1}.csv",
),
mode="w",
encoding="utf-8",
newline="\n",
)
for entry in entryList:
word = entry.s_word
n_word = self.normateWord(word)
defi = entry.defi
dicLine = word + "\t" + defi + "\n"
dicPos = dicFp.tell()
dicFp.write(dicLine)
indexData.append((n_word, dicIndex + 1, dicPos))
dicMaxSize = max(dicMaxSize, dicFp.tell())
dicFp.close()
bucketSize = self.linesPerDirectoryFile
wordCount = 0
dicIndex = 0
entryList = [] # aka bucket
while True:
entry = yield
if entry is None:
break
if entry.isData():
# FIXME
continue
wordCount += 1
entryList.append(entry)
if len(entryList) >= bucketSize:
writeBucket(dicIndex, entryList)
dicIndex += 1
entryList = []
if entryList:
writeBucket(dicIndex, entryList)
entryList = None
self.dicMaxSize = dicMaxSize
self.wordCount = wordCount
langSearchListFp = open(
join(
self._dirname,
f"searchlist{self.directoryPostfix}.csv",
),
mode="w",
newline="\n",
encoding="utf-8",
)
langSearchListFp.write(f"{indexData[0][0]}\t{self.indexIndex}\n")
for word, dicIndex, dicPos in indexData:
indexLine = f"{word}\t{dicIndex}-{dicPos}-B\n"
if (self.indexFp.tell() + len(indexLine)) > self.indexFileMaxSize - 10:
self.nextIndex()
langSearchListFp.write(f"{word}\t{self.indexIndex}\n")
self.indexFp.write(indexLine)
self.indexFp.close()
langSearchListFp.close()
self.writeProbs()
| 7,407
|
Python
|
.py
| 256
| 25.828125
| 88
| 0.730136
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,823
|
testformat.py
|
ilius_pyglossary/pyglossary/plugins/testformat.py
|
from __future__ import annotations
import typing
# -*- coding: utf-8 -*-
from typing import Generator, Iterator
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"enable",
"lname",
"format",
"description",
"extensions",
"extensionCreate",
"singleFile",
"kind",
"wiki",
"website",
"optionsProp",
"Reader",
"Writer",
]
enable = False
lname = "testformat"
format = "Test"
description = "Test Format File(.test)"
extensions = (".test", ".tst")
extensionCreate = ".test"
singleFile = True
kind = "text"
wiki = ""
website = None
# key is option/argument name, value is instance of Option
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._wordCount = 0
def __len__(self) -> int:
# return the number of entries if you have it
# if you don't, return 0 and progressbar will be disabled
# self._wordCount can be set in self.open function
# but if you want to set it, you should set it before
# iteration begins and __iter__ method is called
return self._wordCount
def open(self, filename: str) -> None:
# open the file, read headers / info and set info to self._glos
# and set self._wordCount if you can
# read-options should be keyword arguments in this method
self._wordCount = 100
# log.info(f"some useful message")
# here read info from file and set to Glossary object
self._glos.setInfo("name", "Test")
desc = "Test glossary created by a PyGlossary plugin"
self._glos.setInfo("description", desc)
self._glos.setInfo("author", "Me")
self._glos.setInfo("copyright", "GPL")
def close(self) -> None:
# this is called after reading/conversion is finished
# if you have an open file object, close it here
# if you need to clean up temp files, do it here
pass
def __iter__(self) -> Iterator[EntryType]:
# the easiest and simplest way to implement an Iterator is
# by writing a generator, by calling: yield glos.newEntry(word, defi)
# inside a loop (typically iterating over a file object for text file)
# another way (which is harder) is by implementing __next__ method
# and returning self in __iter__
# that forces you to keep the state manually because __next__ is called
# repeatedly, but __iter__ is only called once
glos = self._glos
for i in range(self._wordCount):
# here get word and definition from file(depending on your format)
word = f"word_{i}"
defi = f"definition {i}"
yield glos.newEntry(word, defi)
class Writer:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
def open(self, filename: str) -> None:
self._filename = filename
def write(self) -> Generator[None, EntryType, None]:
glos = self._glos
filename = self._filename # noqa
# log.info(f"some useful message")
while True:
entry = yield
if entry is None:
break
if entry.isData():
# can save it with entry.save(directory)
continue
word = entry.s_word # noqa
defi = entry.defi # noqa
# here write word and defi to the output file (depending on
# your format)
# here read info from Glossaey object
name = glos.getInfo("name") # noqa
desc = glos.getInfo("description") # noqa
author = glos.author # noqa
copyright = glos.getInfo("copyright") # noqa
# if an info key doesn't exist, getInfo returns empty string
# now write info to the output file (depending on your output format)
def finish(self) -> None:
self._filename = ""
| 3,561
|
Python
|
.py
| 106
| 30.886792
| 73
| 0.707594
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,824
|
almaany.py
|
ilius_pyglossary/pyglossary/plugins/almaany.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import html
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import sqlite3
from collections.abc import Iterator
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "almaany"
format = "Almaany"
description = "Almaany.com (SQLite3)"
extensions = ()
extensionCreate = ".db"
singleFile = True
kind = "binary"
wiki = ""
website = (
"https://play.google.com/store/apps/details?id=com.almaany.arar",
"Almaany.com Arabic Dictionary - Google Play",
)
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
self._con = connect(filename)
self._cur = self._con.cursor()
self._glos.setDefaultDefiFormat("h")
def __len__(self) -> int:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute("select count(*) from WordsTable")
return self._cur.fetchone()[0]
def __iter__(self) -> Iterator[EntryType]:
if self._cur is None:
raise ValueError("cur is None")
from pyglossary.langs.writing_system import getWritingSystemFromText
alternateDict: "dict[str, list[str]]" = {}
self._cur.execute("select wordkey, searchwordkey from Keys")
for row in self._cur.fetchall():
if row[0] in alternateDict:
alternateDict[row[0]].append(row[1])
else:
alternateDict[row[0]] = [row[1]]
self._cur.execute(
"select word, searchword, root, meaning from WordsTable order by id",
)
# FIXME: iteration over self._cur stops after one entry
# and self._cur.fetchone() returns None
# for row in self._cur:
for row in self._cur.fetchall():
word = row[0]
searchword = row[1]
root = row[2]
meaning = row[3]
definition = meaning
definition = definition.replace("|", "<br>")
if root:
definition += (
f'<br>Root: <a href="bword://{html.escape(root)}">{root}</a>'
)
ws = getWritingSystemFromText(meaning)
if ws and ws.direction == "rtl":
definition = f'<div dir="rtl">{definition}</div>'
words = [word, searchword]
if word in alternateDict:
words += alternateDict[word]
yield self._glos.newEntry(
words,
definition,
defiFormat="h",
)
def close(self) -> None:
if self._cur:
self._cur.close()
if self._con:
self._con.close()
self._clear()
| 2,790
|
Python
|
.py
| 101
| 24.673267
| 72
| 0.688132
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,825
|
html_dir.py
|
ilius_pyglossary/pyglossary/plugins/html_dir.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import html
import os
import re
import time
from collections.abc import Generator
from functools import lru_cache
from os.path import isdir, isfile, join
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import (
EntryType,
GlossaryType,
)
from pyglossary.core import log
from pyglossary.option import (
BoolOption,
EncodingOption,
IntOption,
Option,
StrOption,
)
from pyglossary.text_utils import (
escapeNTB,
unescapeNTB,
)
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "html_dir"
format = "HtmlDir"
description = "HTML Directory"
extensions = (".hdir",)
extensionCreate = ".hdir/"
singleFile = False
kind = "directory"
wiki = ""
website = None
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"resources": BoolOption(
comment="Enable resources / data files",
),
"max_file_size": IntOption(
comment="Maximum file size in bytes",
),
"filename_format": StrOption(
comment="Filename format, default: {n:05d}.html",
),
"escape_defi": BoolOption(
comment="Escape definitions",
),
"dark": BoolOption(
comment="Use dark style",
),
"css": StrOption(
comment="Path to css file",
),
"word_title": BoolOption(
comment="Add headwords title to beginning of definition",
),
}
nbsp = "\xa0"
# nbsp = " "
darkStyle = """
body {{
background-color: #373737;
color: #eee;
}}
a {{ color: #aaaaff; }}
a.broken {{ color: #e0c0c0; }}
a.no_ul {{ text-decoration: none; }}
b.headword {{ font-size: 1.5em; color: #c7ffb9; }}
h1 {{ font-size: 1.5em; color: #c7ffb9;}}
h2 {{ font-size: 1.3em;}}
h3 {{ font-size: 1.0em;}}
h4 {{ font-size: 1.0em;}}
h5 {{ font-size: 1.0em;}}
h6 {{ font-size: 1.0em;}}
"""
class Writer:
_encoding: str = "utf-8"
_resources: bool = True
_max_file_size: int = 102400
_filename_format: str = "{n:05d}.html"
_escape_defi: bool = False
_dark: bool = True
_css: str = ""
_word_title: bool = True
@staticmethod
def stripFullHtmlError(entry: EntryType, error: str) -> None:
log.error(f"error in stripFullHtml: {error}, words={entry.l_word!r}")
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._fileObj: "io.IOBase | None" = None
self._encoding = "utf-8"
self._filename_format = "{n:05d}.html"
self._tail = "</body></html>"
self._filenameList: list[str] = []
glos.stripFullHtml(errorHandler=self.stripFullHtmlError)
self._resSrcPattern = re.compile(' src="([^"]*)"')
def open(self, filename: str) -> None:
self._filename = filename
self._resDir = resDir = join(filename, "res")
if not isdir(filename):
os.mkdir(filename)
if not isdir(resDir):
os.mkdir(resDir)
if self._css:
self.copyCSS(self._css)
def copyCSS(self, cssPath: str) -> None:
import shutil
shutil.copy(cssPath, join(self._filename, "style.css"))
def finish(self) -> None:
pass
def getNextFilename(self) -> str:
return self._filename_format.format(
n=len(self._filenameList),
)
def nextFile(self) -> io.TextIOBase:
if self._fileObj:
self._fileObj.write(self._tail)
self._fileObj.close()
filename = self.getNextFilename()
self._filenameList.append(filename)
self._fileObj = open(
join(
self._filename,
filename,
),
mode="w",
encoding=self._encoding,
)
return self._fileObj
def fixLinks(self, linkTargetSet: "set[str]") -> None: # noqa: PLR0912
import gc
gc.collect()
dirn = self._filename
filenameList = self._filenameList
fileByWord: "dict[str, list[tuple[str, int]]]" = {}
for line in open(join(dirn, "index.txt"), encoding="utf-8"):
line = line.rstrip("\n") # noqa: PLW2901
if not line:
continue
entryIndexStr, wordEsc, filename, _ = line.split("\t")
entryIndex = int(entryIndexStr)
# entryId = f"entry{entryIndex}"
word = unescapeNTB(wordEsc)
if word not in linkTargetSet:
continue
if word in fileByWord:
fileByWord[word].append((filename, entryIndex))
else:
fileByWord[word] = [(filename, entryIndex)]
# with open(join(dirn, "fileByWord.json"), "w") as fileByWordFile:
# json.dump(fileByWord, fileByWordFile, ensure_ascii=False, indent="\t")
@lru_cache(maxsize=10)
def getLinksByFile(fileIndex: int) -> io.TextIOBase:
return open(
join(dirn, f"links{fileIndex}"),
mode="a",
encoding="utf-8",
)
log.info("")
for line in open(join(dirn, "links.txt"), encoding="utf-8"):
line = line.rstrip("\n") # noqa: PLW2901
if not line:
continue
target, fileIndexStr, x_start, x_size = line.split("\t")
target = unescapeNTB(target)
if target not in fileByWord:
targetNew = ""
else:
targetFilename, targetEntryIndex = fileByWord[target][0]
if targetFilename == filename:
continue
targetNew = f"{targetFilename}#entry{targetEntryIndex}"
_file = getLinksByFile(int(fileIndexStr))
_file.write(
f"{x_start}\t{x_size}\t{targetNew}\n",
)
_file.flush()
linkTargetSet.clear()
del fileByWord, linkTargetSet
gc.collect()
if os.sep == "\\":
time.sleep(0.1)
entry_url_fmt = self._glos.getInfo("entry_url")
re_href = re.compile(
b' href="[^<>"]*?"',
re.IGNORECASE,
)
for fileIndex, filename in enumerate(filenameList):
if not isfile(join(dirn, f"links{fileIndex}")):
continue
with open(join(dirn, filename), mode="rb") as inFile:
with open(join(dirn, f"{filename}.new"), mode="wb") as outFile:
for linkLine in open(join(dirn, f"links{fileIndex}"), "rb"):
outFile.flush()
(
b_x_start,
b_x_size,
b_target,
) = linkLine.rstrip(b"\n").split(b"\t")
outFile.write(
inFile.read(
int(b_x_start, 16) - inFile.tell(),
),
)
curLink = inFile.read(int(b_x_size, 16))
if b_target:
outFile.write(
re_href.sub(
b' href="./' + b_target + b'"',
curLink,
),
)
continue
if not entry_url_fmt:
outFile.write(
curLink.replace(
b' href="#',
b' class="broken" href="#',
),
)
continue
_st = curLink.decode("utf-8")
i = _st.find('href="#')
j = _st.find('"', i + 7)
word = _st[i + 7 : j]
url = entry_url_fmt.format(word=word)
outFile.write(
(
_st[:i] + f'class="broken" href="{url}"' + _st[j + 1 :]
).encode("utf-8"),
)
outFile.write(inFile.read())
os.remove(join(dirn, filename))
os.rename(join(dirn, f"{filename}.new"), join(dirn, filename))
os.remove(join(dirn, f"links{fileIndex}"))
def writeInfo(self, filename: str, header: str) -> None:
glos = self._glos
title = glos.getInfo("name")
customStyle = (
"table, th, td {border: 1px solid black; "
"border-collapse: collapse; padding: 5px;}"
)
infoHeader = header.format(
pageTitle=f"Info: {title}",
customStyle=customStyle,
)
with open(
join(filename, "info.html"),
mode="w",
encoding=self._encoding,
) as _file:
_file.write(
infoHeader + "<table>"
"<tr>"
'<th width="%10">Key</th>'
'<th width="%90">Value</th>'
"</tr>\n",
)
for key, value in glos.iterInfo():
_file.write(
f"<tr><td>{key}</td><td>{value}</td></tr>\n",
)
_file.write("</table></body></html>")
@staticmethod
def _subResSrc(m: "re.Match") -> str:
url = m.group(1)
if "://" in url:
return m.group(0)
url = "res/" + url
return f' src="{url}"'
def write(self) -> Generator[None, EntryType, None]: # noqa: PLR0912
encoding = self._encoding
resources = self._resources
max_file_size = self._max_file_size
filename_format = self._filename_format
escape_defi = self._escape_defi
wordSep = ' <font color="red">|</font> '
initFileSizeMax = 100
glos = self._glos
filename = self._filename
self._encoding = encoding
self._filename_format = filename_format
entry_url_fmt = glos.getInfo("entry_url")
def getEntryWebLink(entry: EntryType) -> str:
if not entry_url_fmt:
return ""
url = entry_url_fmt.format(word=html.escape(entry.l_word[0]))
return f'{nbsp}<a class="no_ul" href="{url}">🌏</a>'
# from math import log2, ceil
# maxPosHexLen = int(ceil(log2(max_file_size) / 4))
indexTxtFileObj = open(
join(filename, "index.txt"),
mode="w",
encoding="utf-8",
)
linksTxtFileObj = open(
join(filename, "links.txt"),
mode="w",
encoding="utf-8",
)
title = glos.getInfo("name")
style = ""
if self._dark:
style = darkStyle
cssLink = '<link rel="stylesheet" href="style.css" />' if self._css else ""
header = (
"<!DOCTYPE html>\n"
"<html><head>"
"<title>{pageTitle}</title>"
f'<meta charset="{encoding}">'
f'<style type="text/css">{style}{{customStyle}}</style>{cssLink}'
"</meta></head><body>\n"
)
def pageHeader(n: int) -> str:
return header.format(
pageTitle=f"Page {n} of {title}",
customStyle="",
)
def navBar() -> str:
links = []
if len(self._filenameList) > 1:
links.append(f'<a href="./{self._filenameList[-2]}">◀</a>')
links.extend(
[
f'<a href="./{self.getNextFilename()}">▶</a>',
'<a href="./info.html">ℹ️</a></div>', # noqa: RUF001
],
)
return (
'<nav style="text-align: center; font-size: 2.5em;">'
+ f"{nbsp}{nbsp}{nbsp}".join(links)
+ "</nav>"
)
tailSize = len(self._tail.encode(encoding))
if max_file_size < len(header) + tailSize:
raise ValueError(f"{max_file_size=} is too small")
max_file_size -= tailSize
if not isdir(self._filename):
os.mkdir(self._filename)
fileObj = self.nextFile()
fileObj.write(pageHeader(0))
fileObj.write(navBar())
re_fixed_link = re.compile(
r'<a (?:[^<>]*? )?href="#([^<>"]+?)">[^<>]+?</a>',
re.IGNORECASE,
)
linkTargetSet = set()
def replaceBword(text: str) -> str:
return text.replace(
' href="bword://',
' href="#',
)
def addLinks(text: str, pos: int) -> None:
for m in re_fixed_link.finditer(text):
if ' class="entry_link"' in m.group(0):
continue
if m.group(0).count("href=") != 1:
log.error(f"unexpected match: {m.group(0)}")
target = html.unescape(m.group(1))
linkTargetSet.add(target)
start = m.start()
b_start = len(text[:start].encode(encoding))
b_size = len(text[start : m.end()].encode(encoding))
linksTxtFileObj.write(
f"{escapeNTB(target)}\t"
f"{len(self._filenameList) - 1}\t"
f"{pos + b_start:x}\t"
f"{b_size:x}\n",
)
linksTxtFileObj.flush()
self.writeInfo(filename, header)
_word_title = self._word_title
resDir = self._resDir
entryIndex = -1
while True:
entryIndex += 1
entry = yield
if entry is None:
break
if entry.isData():
if resources:
entry.save(resDir)
continue
entry.detectDefiFormat()
defi = entry.defi
defiFormat = entry.defiFormat
if defi.startswith("<!DOCTYPE html>") and defiFormat != "h":
log.error(f"bad {defiFormat=}")
defiFormat = "h"
if defiFormat == "m":
defi = html.escape(defi)
if "\n" in defi:
# could be markdown or unformatted plaintext
# FIXME: this changes the font to a monospace
defi = f"<pre>{defi}</pre>"
elif defiFormat == "h":
defi = self._resSrcPattern.sub(self._subResSrc, defi)
if escape_defi:
defi = html.escape(defi)
entryId = f"entry{entryIndex}"
if _word_title:
words = [html.escape(word) for word in entry.l_word]
title = glos.wordTitleStr(
wordSep.join(words),
sample=entry.l_word[0],
_class="headword",
)
if not title:
title = f"Entry {entryIndex}"
# entry_link_sym = "¶"
entry_link_sym = "🔗"
text = (
f'<div id="{entryId}">{title}{nbsp}{nbsp}'
f'<a class="no_ul" class="entry_link" href="#{entryId}">'
f"{entry_link_sym}</a>"
f"{getEntryWebLink(entry)}"
f"<br>\n{defi}"
"</div>\n"
"<hr>\n"
)
pos = fileObj.tell()
if pos > initFileSizeMax and pos > max_file_size - len(
text.encode(encoding),
):
fileObj = self.nextFile()
fileObj.write(
pageHeader(
len(self._filenameList) - 1,
),
)
fileObj.write(navBar())
pos = fileObj.tell()
tmpFilename = escapeNTB(self._filenameList[-1])
for word in entry.l_word:
indexTxtFileObj.write(
f"{entryIndex}\t"
f"{escapeNTB(word)}\t"
f"{tmpFilename}\t"
f"{pos}\n",
)
del tmpFilename
text = replaceBword(text)
addLinks(text, pos)
fileObj.write(text)
fileObj.close()
self._fileObj = None
indexTxtFileObj.close()
linksTxtFileObj.close()
if linkTargetSet:
log.info(f"{len(linkTargetSet)} link targets found")
log.info("Fixing links, please wait...")
self.fixLinks(linkTargetSet)
os.remove(join(filename, "links.txt"))
| 12,985
|
Python
|
.py
| 473
| 23.496829
| 77
| 0.638382
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,826
|
sql.py
|
ilius_pyglossary/pyglossary/plugins/sql.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Generator
from typing import TYPE_CHECKING
from pyglossary.option import (
BoolOption,
EncodingOption,
ListOption,
NewlineOption,
Option,
)
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "sql"
format = "Sql"
description = "SQL (.sql)"
extensions = (".sql",)
extensionCreate = ".sql"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/SQL"
website = None
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"info_keys": ListOption(comment="List of dbinfo table columns"),
"add_extra_info": BoolOption(comment="Create dbinfo_extra table"),
"newline": NewlineOption(),
"transaction": BoolOption(comment="Use TRANSACTION"),
}
class Writer:
_encoding: str = "utf-8"
_info_keys: "list | None" = None
_add_extra_info: bool = True
_newline: str = "<br>"
_transaction: bool = False
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: "io.IOBase | None" = None
def finish(self) -> None:
self._filename = ""
if self._file:
self._file.close()
self._file = None
def open(self, filename: str) -> None:
self._filename = filename
self._file = open(filename, "w", encoding=self._encoding)
self._writeInfo()
def _writeInfo(self) -> None:
fileObj = self._file
if fileObj is None:
raise ValueError("fileObj is None")
newline = self._newline
info_keys = self._getInfoKeys()
infoDefLine = "CREATE TABLE dbinfo ("
infoValues = []
glos = self._glos
for key in info_keys:
value = glos.getInfo(key)
value = (
value.replace("'", "''")
.replace("\x00", "")
.replace("\r", "")
.replace("\n", newline)
)
infoValues.append(f"'{value}'")
infoDefLine += f"{key} char({len(value)}), "
infoDefLine = infoDefLine[:-2] + ");"
fileObj.write(infoDefLine + "\n")
if self._add_extra_info:
fileObj.write(
"CREATE TABLE dbinfo_extra ("
"'id' INTEGER PRIMARY KEY NOT NULL, "
"'name' TEXT UNIQUE, 'value' TEXT);\n",
)
fileObj.write(
"CREATE TABLE word ('id' INTEGER PRIMARY KEY NOT NULL, "
"'w' TEXT, 'm' TEXT);\n",
)
fileObj.write(
"CREATE TABLE alt ('id' INTEGER NOT NULL, 'w' TEXT);\n",
)
if self._transaction:
fileObj.write("BEGIN TRANSACTION;\n")
fileObj.write(f"INSERT INTO dbinfo VALUES({','.join(infoValues)});\n")
if self._add_extra_info:
extraInfo = glos.getExtraInfos(info_keys)
for index, (key, value) in enumerate(extraInfo.items()):
key2 = key.replace("'", "''")
value2 = value.replace("'", "''")
fileObj.write(
f"INSERT INTO dbinfo_extra VALUES({index + 1}, "
f"'{key2}', '{value2}');\n",
)
def _getInfoKeys(self) -> list[str]:
info_keys = self._info_keys
if info_keys:
return info_keys
return [
"dbname",
"author",
"version",
"direction",
"origLang",
"destLang",
"license",
"category",
"description",
]
def write(self) -> Generator[None, EntryType, None]:
newline = self._newline
fileObj = self._file
if fileObj is None:
raise ValueError("fileObj is None")
def fixStr(word: str) -> str:
return word.replace("'", "''").replace("\r", "").replace("\n", newline)
_id = 1
while True:
entry = yield
if entry is None:
break
if entry.isData():
# FIXME
continue
words = entry.l_word
word = fixStr(words[0])
defi = fixStr(entry.defi)
fileObj.write(
f"INSERT INTO word VALUES({_id}, '{word}', '{defi}');\n",
)
for alt in words[1:]:
fileObj.write(
f"INSERT INTO alt VALUES({_id}, '{fixStr(alt)}');\n",
)
_id += 1
if self._transaction:
fileObj.write("END TRANSACTION;\n")
fileObj.write("CREATE INDEX ix_word_w ON word(w COLLATE NOCASE);\n")
fileObj.write("CREATE INDEX ix_alt_id ON alt(id COLLATE NOCASE);\n")
fileObj.write("CREATE INDEX ix_alt_w ON alt(w COLLATE NOCASE);\n")
| 4,164
|
Python
|
.py
| 156
| 23.512821
| 74
| 0.65654
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,827
|
zimfile.py
|
ilius_pyglossary/pyglossary/plugins/zimfile.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
from collections.abc import Iterator
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from libzim.reader import Archive # type: ignore
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
from pyglossary.core import cacheDir, exc_note, log, pip
from pyglossary.option import UnicodeErrorsOption
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "zim"
format = "Zim"
description = "Zim (.zim, for Kiwix)"
extensions = (".zim",)
extensionCreate = ".zim"
singleFile = True
kind = "binary"
wiki = "https://en.wikipedia.org/wiki/ZIM_(file_format)"
website = (
"https://wiki.openzim.org/wiki/OpenZIM",
"OpenZIM",
)
optionsProp: "dict[str, Option]" = {
"text_unicode_errors": UnicodeErrorsOption(
comment="Unicode Errors for plaintext, values: `strict`, `ignore`, `replace`",
),
"html_unicode_errors": UnicodeErrorsOption(
comment="Unicode Errors for HTML, values: `strict`, `ignore`, `replace`",
),
}
# https://wiki.kiwix.org/wiki/Software
# to download zim files:
# https://archive.org/details/zimarchive
# https://dumps.wikimedia.org/other/kiwix/zim/
# I can't find any way to download zim files from https://library.kiwix.org/
# which wiki.openzim.org points at for downloaing zim files
class Reader:
_text_unicode_errors = "strict"
_html_unicode_errors = "strict"
depends = {
"libzim": "libzim>=1.0",
}
resourceMimeTypes = {
"image/png",
"image/jpeg",
"image/gif",
"image/svg+xml",
"image/webp",
"image/x-icon",
"text/css",
"text/javascript",
"application/javascript",
"application/json",
"application/octet-stream",
"application/octet-stream+xapian",
"application/x-chrome-extension",
"application/warc-headers",
"application/font-woff",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._zimfile: "Archive | None" = None
def open(self, filename: str) -> None:
try:
from libzim.reader import Archive
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install libzim` to install")
raise
self._filename = filename
self._zimfile = Archive(filename)
def close(self) -> None:
self._filename = ""
self._zimfile = None
def __len__(self) -> int:
if self._zimfile is None:
log.error("len(reader) called before reader.open()")
return 0
return self._zimfile.entry_count
def __iter__(self) -> Iterator[EntryType | None]: # noqa: PLR0912
glos = self._glos
zimfile = self._zimfile
if zimfile is None:
return
emptyContentCount = 0
invalidMimeTypeCount = 0
undefinedMimeTypeCount = 0
entryCount = zimfile.entry_count
redirectCount = 0
windows = os.sep == "\\"
try:
f_namemax = os.statvfs(cacheDir).f_namemax # type: ignore
except AttributeError:
log.warning("Unsupported operating system (no os.statvfs)")
# Windows: CreateFileA has a limit of 260 characters.
# CreateFileW supports names up to about 32760 characters (64kB).
f_namemax = 200
fileNameTooLong = []
text_unicode_errors = self._text_unicode_errors
html_unicode_errors = self._html_unicode_errors
for entryIndex in range(entryCount):
zEntry = zimfile._get_entry_by_id(entryIndex)
word = zEntry.title
if zEntry.is_redirect:
redirectCount += 1
targetWord = zEntry.get_redirect_entry().title
yield glos.newEntry(
word,
f'Redirect: <a href="bword://{targetWord}">{targetWord}</a>',
defiFormat="h",
)
continue
zItem = zEntry.get_item()
b_content = zItem.content.tobytes()
if not b_content:
emptyContentCount += 1
yield None
# TODO: test with more zim files
# Looks like: zItem.path == zEntry.path == "-" + word
# print(f"b_content empty, {word=}, {zEntry.path=}, {zItem.path=}")
# if zEntry.path == "-" + word:
# yield None
# else:
# defi = f"Path: {zEntry.path}"
# yield glos.newEntry(word, defi, defiFormat="m")
continue
try:
mimetype = zItem.mimetype
except RuntimeError:
invalidMimeTypeCount += 1
yield glos.newDataEntry(word, b_content)
if mimetype == "undefined":
undefinedMimeTypeCount += 1
continue
mimetype = mimetype.split(";")[0]
if mimetype.startswith("text/html"):
# can be "text/html;raw=true"
defi = b_content.decode("utf-8", errors=html_unicode_errors)
defi = defi.replace(' src="../I/', ' src="./')
yield glos.newEntry(word, defi, defiFormat="h")
continue
if mimetype == "text/plain":
yield glos.newEntry(
word,
b_content.decode("utf-8", errors=text_unicode_errors),
defiFormat="m",
)
continue
if mimetype not in self.resourceMimeTypes:
log.warning(f"Unrecognized {mimetype=}")
if len(word) > f_namemax:
fileNameTooLong.append(word)
continue
if "|" in word:
log.warning(f"resource title: {word}")
if windows:
continue
try:
entry = glos.newDataEntry(word, b_content)
except Exception as e:
log.error(f"error creating file: {e}")
continue
yield entry
log.info(f"ZIM Entry Count: {entryCount}")
if not fileNameTooLong:
log.error(f"Files with name too long: {len(fileNameTooLong)}")
if emptyContentCount > 0:
log.info(f"Empty Content Count: {emptyContentCount}")
if invalidMimeTypeCount > 0:
log.info(f"Invalid MIME-Type Count: {invalidMimeTypeCount}")
if undefinedMimeTypeCount > 0:
log.info(f"MIME-Type 'undefined' Count: {invalidMimeTypeCount}")
if redirectCount > 0:
log.info(f"Redirect Count: {redirectCount}")
| 5,728
|
Python
|
.py
| 190
| 26.742105
| 80
| 0.700073
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,828
|
dict_org_source.py
|
ilius_pyglossary/pyglossary/plugins/dict_org_source.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Generator
from typing import TYPE_CHECKING
from pyglossary.option import BoolOption, Option
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dict_org_source"
format = "DictOrgSource"
description = "DICT.org dictfmt source file"
extensions = (".dtxt",)
extensionCreate = ".dtxt"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/DICT"
website = (
"https://github.com/cheusov/dictd",
"@cheusov/dictd",
)
optionsProp: "dict[str, Option]" = {
"remove_html_all": BoolOption(comment="Remove all HTML tags"),
}
class Writer:
_remove_html_all: bool = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
def finish(self) -> None:
self._filename = ""
def open(self, filename: str) -> None:
self._filename = filename
if self._remove_html_all:
self._glos.removeHtmlTagsAll()
# TODO: add another bool flag to only remove html tags that are not
# supported by GtkTextView
@staticmethod
def _defiEscapeFunc(defi: str) -> str:
return defi.replace("\r", "")
def write(self) -> Generator[None, EntryType, None]:
from pyglossary.text_writer import writeTxt
yield from writeTxt(
self._glos,
entryFmt=":{word}:{defi}\n",
filename=self._filename,
defiEscapeFunc=self._defiEscapeFunc,
ext=".dtxt",
)
| 1,610
|
Python
|
.py
| 62
| 23.741935
| 69
| 0.719218
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,829
|
stardict.py
|
ilius_pyglossary/pyglossary/plugins/stardict.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import gzip
import os
import re
import typing
from collections import Counter
from collections.abc import Callable, Generator, Iterator, Sequence
from os.path import (
dirname,
getsize,
isdir,
isfile,
join,
realpath,
split,
splitext,
)
from pprint import pformat
from time import perf_counter as now
from typing import (
TYPE_CHECKING,
Any,
Literal,
Protocol,
TypeVar,
)
if TYPE_CHECKING:
import io
import sqlite3
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.langs import Lang
from pyglossary.core import log
from pyglossary.flags import ALWAYS, DEFAULT_YES
from pyglossary.glossary_utils import Error
from pyglossary.option import (
BoolOption,
Option,
StrOption,
)
from pyglossary.text_utils import (
uint32FromBytes,
uint32ToBytes,
uint64FromBytes,
uint64ToBytes,
)
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "stardict"
format = "Stardict"
description = "StarDict (.ifo)"
extensions = (".ifo",)
extensionCreate = "-stardict/"
singleFile = False
sortOnWrite = ALWAYS
sortKeyName = "stardict"
sortEncoding = "utf-8"
kind = "directory"
wiki = "https://en.wikipedia.org/wiki/StarDict"
website = (
"http://huzheng.org/stardict/",
"huzheng.org/stardict",
)
# https://github.com/huzheng001/stardict-3/blob/master/dict/doc/StarDictFileFormat
optionsProp: "dict[str, Option]" = {
"large_file": BoolOption(
comment="Use idxoffsetbits=64 bits, for large files only",
),
"stardict_client": BoolOption(
comment="Modify html entries for StarDict 3.0",
),
"dictzip": BoolOption(
comment="Compress .dict file to .dict.dz",
),
"sametypesequence": StrOption(
values=["", "h", "m", "x", None],
comment="Definition format: h=html, m=plaintext, x=xdxf",
),
"merge_syns": BoolOption(
comment="Write alternates to .idx instead of .syn",
),
"xdxf_to_html": BoolOption(
comment="Convert XDXF entries to HTML",
),
"xsl": BoolOption(
comment="Use XSL transformation",
),
"unicode_errors": StrOption(
values=[
"strict", # raise a UnicodeDecodeError exception
"ignore", # just leave the character out
"replace", # use U+FFFD, REPLACEMENT CHARACTER
"backslashreplace", # insert a \xNN escape sequence
],
comment="What to do with Unicode decoding errors",
),
"audio_goldendict": BoolOption(
comment="Convert audio links for GoldenDict (desktop)",
),
"audio_icon": BoolOption(
comment="Add glossary's audio icon",
),
"sqlite": BoolOption(
comment="Use SQLite to limit memory usage",
),
}
if os.getenv("PYGLOSSARY_STARDICT_NO_FORCE_SORT") == "1":
sortOnWrite = DEFAULT_YES
infoKeys = (
"bookname",
"author",
"email",
"website",
"description",
"date",
)
# re_newline = re.compile("[\n\r]+")
re_newline = re.compile("\n\r?|\r\n?")
def newlinesToSpace(text: str) -> str:
return re_newline.sub(" ", text)
def newlinesToBr(text: str) -> str:
return re_newline.sub("<br>", text)
def verifySameTypeSequence(s: str) -> bool:
if not s:
return True
# maybe should just check it's in ("h", "m", "x")
if not s.isalpha():
return False
return len(s) == 1
class XdxfTransformerType(Protocol):
def transformByInnerString(self, text: str) -> str: ...
T_SDListItem_contra = TypeVar("T_SDListItem_contra", contravariant=True)
class T_SdList(Protocol[T_SDListItem_contra]):
def append(self, x: T_SDListItem_contra) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[Any]: ...
def sort(self) -> None: ...
class MemSdList:
def __init__(self) -> None:
self._l: list[Any] = []
def append(self, x: Any) -> None:
self._l.append(x)
def __len__(self) -> int:
return len(self._l)
def __iter__(self) -> Iterator[Any]:
return iter(self._l)
def sortKey(self, item: "tuple[bytes, Any]") -> tuple[bytes, bytes]: # noqa: PLR6301
return (
item[0].lower(),
item[0],
)
def sort(self) -> None:
self._l.sort(key=self.sortKey)
class BaseSqList:
def __init__(
self,
filename: str,
) -> None:
from sqlite3 import connect
if isfile(filename):
log.warning(f"Renaming {filename} to {filename}.bak")
os.rename(filename, filename + "bak")
self._filename = filename
self._con: "sqlite3.Connection | None" = connect(filename)
self._cur: "sqlite3.Cursor | None" = self._con.cursor()
if not filename:
raise ValueError(f"invalid {filename=}")
self._orderBy = "word_lower, word"
self._sorted = False
self._len = 0
columns = self._columns = [
("word_lower", "TEXT"),
("word", "TEXT"),
] + self.getExtraColumns()
self._columnNames = ",".join(col[0] for col in columns)
colDefs = ",".join(f"{col[0]} {col[1]}" for col in columns)
self._con.execute(
f"CREATE TABLE data ({colDefs})",
)
self._con.execute(
f"CREATE INDEX sortkey ON data({self._orderBy});",
)
self._con.commit()
@classmethod
def getExtraColumns(cls) -> list[tuple[str, str]]:
# list[(columnName, dataType)]
return []
def __len__(self) -> int:
return self._len
def append(self, item: Sequence) -> None:
if self._cur is None or self._con is None:
raise RuntimeError("db is closed")
self._len += 1
extraN = len(self._columns) - 1
self._cur.execute(
f"insert into data({self._columnNames}) values (?{', ?' * extraN})",
[item[0].lower()] + list(item),
)
if self._len % 1000 == 0:
self._con.commit()
def sort(self) -> None:
pass
def close(self) -> None:
if self._cur is None or self._con is None:
return
self._con.commit()
self._cur.close()
self._con.close()
self._con = None
self._cur = None
def __del__(self) -> None:
try:
self.close()
except AttributeError as e:
log.error(str(e))
def __iter__(self) -> Iterator[EntryType]:
if self._cur is None:
raise RuntimeError("db is closed")
query = f"SELECT * FROM data ORDER BY {self._orderBy}"
self._cur.execute(query)
for row in self._cur:
yield row[1:]
class IdxSqList(BaseSqList):
@classmethod
def getExtraColumns(cls) -> list[tuple[str, str]]:
# list[(columnName, dataType)]
return [
("idx_block", "BLOB"),
]
class SynSqList(BaseSqList):
@classmethod
def getExtraColumns(cls) -> list[tuple[str, str]]:
# list[(columnName, dataType)]
return [
("entry_index", "INTEGER"),
]
class Reader:
_xdxf_to_html: bool = True
_xsl: bool = False
_unicode_errors: str = "strict"
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self.clear()
self._xdxfTr: "XdxfTransformerType | None" = None
self._large_file = False
"""
indexData format
indexData[i] - i-th record in index file,
a tuple (previously a list) of length 3
indexData[i][0] - b_word (bytes)
indexData[i][1] - definition block offset in dict file (int)
indexData[i][2] - definition block size in dict file (int)
REMOVED:
indexData[i][3] - list of definitions
indexData[i][3][j][0] - definition data
indexData[i][3][j][1] - definition type - "h", "m" or "x"
indexData[i][4] - list of synonyms (strings)
synDict:
a dict { entryIndex -> altList }
"""
def xdxf_setup(self) -> XdxfTransformerType:
if self._xsl:
from pyglossary.xdxf.xsl_transform import XslXdxfTransformer
return XslXdxfTransformer(encoding="utf-8")
from pyglossary.xdxf.transform import XdxfTransformer
return XdxfTransformer(encoding="utf-8")
def xdxf_transform(self, text: str) -> str:
if self._xdxfTr is None:
self._xdxfTr = self.xdxf_setup()
return self._xdxfTr.transformByInnerString(text)
def close(self) -> None:
if self._dictFile:
self._dictFile.close()
self.clear()
def clear(self) -> None:
self._dictFile: "io.IOBase | None" = None
self._filename = "" # base file path, no extension
self._indexData: "list[tuple[bytes, int, int]]" = []
self._synDict: "dict[int, list[str]]" = {}
self._sametypesequence = ""
self._resDir = ""
self._resFileNames: list[str] = []
self._wordCount: "int | None" = None
def open(self, filename: str) -> None:
if splitext(filename)[1].lower() == ".ifo":
filename = splitext(filename)[0]
elif isdir(filename):
filename = join(filename, filename)
self._filename = filename
self._filename = realpath(self._filename)
self.readIfoFile()
sametypesequence = self._glos.getInfo("sametypesequence")
if not verifySameTypeSequence(sametypesequence):
raise LookupError(f"Invalid {sametypesequence = }")
self._indexData = self.readIdxFile()
self._wordCount = len(self._indexData)
self._synDict = self.readSynFile()
self._sametypesequence = sametypesequence
if isfile(self._filename + ".dict.dz"):
self._dictFile = gzip.open(self._filename + ".dict.dz", mode="rb")
else:
self._dictFile = open(self._filename + ".dict", mode="rb")
self._resDir = join(dirname(self._filename), "res")
if isdir(self._resDir):
self._resFileNames = os.listdir(self._resDir)
else:
self._resDir = ""
self._resFileNames = []
# self.readResources()
def __len__(self) -> int:
if self._wordCount is None:
raise RuntimeError(
"StarDict: len(reader) called while reader is not open",
)
return self._wordCount + len(self._resFileNames)
def readIfoFile(self) -> None:
""".ifo file is a text file in utf-8 encoding."""
with open(
self._filename + ".ifo",
mode="rb",
) as ifoFile:
for line in ifoFile:
line = line.strip() # noqa: PLW2901
if not line:
continue
if line == b"StarDict's dict ifo file":
continue
b_key, _, b_value = line.partition(b"=")
if not (b_key and b_value):
continue
try:
key = b_key.decode("utf-8")
value = b_value.decode("utf-8", errors=self._unicode_errors)
except UnicodeDecodeError:
log.error(f"ifo line is not UTF-8: {line!r}")
continue
self._glos.setInfo(key, value)
idxoffsetbits = self._glos.getInfo("idxoffsetbits")
if idxoffsetbits:
if idxoffsetbits == "32":
self._large_file = False
elif idxoffsetbits == "64":
self._large_file = True
else:
raise ValueError(f"invalid {idxoffsetbits = }")
def readIdxFile(self) -> list[tuple[bytes, int, int]]:
if isfile(self._filename + ".idx.gz"):
with gzip.open(self._filename + ".idx.gz") as g_file:
idxBytes = g_file.read()
else:
with open(self._filename + ".idx", "rb") as _file:
idxBytes = _file.read()
indexData = []
pos = 0
if self._large_file:
def getOffset() -> tuple[int, int]:
return uint64FromBytes(idxBytes[pos : pos + 8]), pos + 8
else:
def getOffset() -> tuple[int, int]:
return uint32FromBytes(idxBytes[pos : pos + 4]), pos + 4
while pos < len(idxBytes):
beg = pos
pos = idxBytes.find(b"\x00", beg)
if pos < 0:
log.error("Index file is corrupted")
break
b_word = idxBytes[beg:pos]
pos += 1
if pos + 8 > len(idxBytes):
log.error("Index file is corrupted")
break
offset, pos = getOffset()
size = uint32FromBytes(idxBytes[pos : pos + 4])
pos += 4
indexData.append((b_word, offset, size))
return indexData
def decodeRawDefiPart(
self,
b_defiPart: bytes,
i_type: int,
unicode_errors: str,
) -> tuple[str, str]:
_type = chr(i_type)
"""
_type: 'r'
https://github.com/huzheng001/stardict-3/blob/master/dict/doc/StarDictFileFormat#L431
Resource file list.
The content can be:
img:pic/example.jpg // Image file
snd:apple.wav // Sound file
vdo:film.avi // Video file
att:file.bin // Attachment file
More than one line is supported as a list of available files.
StarDict will find the files in the Resource Storage.
The image will be shown, the sound file will have a play button.
You can "save as" the attachment file and so on.
The file list must be a utf-8 string ending with '\0'.
Use '\n' for separating new lines.
Use '/' character as directory separator.
"""
_format = {
"m": "m",
"t": "m",
"y": "m",
"g": "h",
"h": "h",
"x": "x",
}.get(_type, "")
if not _format:
log.warning(f"Definition type {_type!r} is not supported")
_defi = b_defiPart.decode("utf-8", errors=unicode_errors)
# log.info(f"{_type}->{_format}: {_defi}".replace("\n", "")[:120])
if _format == "x" and self._xdxf_to_html:
_defi = self.xdxf_transform(_defi)
_format = "h"
return _format, _defi
def renderRawDefiList(
self,
rawDefiList: "list[tuple[bytes, int]]",
unicode_errors: str,
) -> tuple[str, str]:
if len(rawDefiList) == 1:
b_defiPart, i_type = rawDefiList[0]
_format, _defi = self.decodeRawDefiPart(
b_defiPart=b_defiPart,
i_type=i_type,
unicode_errors=unicode_errors,
)
return _defi, _format
defiFormatSet = set()
defisWithFormat = []
for b_defiPart, i_type in rawDefiList:
_format, _defi = self.decodeRawDefiPart(
b_defiPart=b_defiPart,
i_type=i_type,
unicode_errors=unicode_errors,
)
defisWithFormat.append((_defi, _format))
defiFormatSet.add(_format)
if len(defiFormatSet) == 1:
defis = [_defi for _defi, _ in defisWithFormat]
_format = defiFormatSet.pop()
if _format == "h":
return "\n<hr>".join(defis), _format
return "\n".join(defis), _format
if not defiFormatSet:
log.error(f"empty defiFormatSet, {rawDefiList=}")
return "", ""
# convert plaintext or xdxf to html
defis = []
for _defi, _format in defisWithFormat:
if _format == "m":
_defi = _defi.replace("\n", "<br/>")
_defi = f"<pre>{_defi}</pre>"
elif _format == "x":
_defi = self.xdxf_transform(_defi)
defis.append(_defi)
return "\n<hr>\n".join(defis), "h"
def __iter__(self) -> Iterator[EntryType]: # noqa: PLR0912
indexData = self._indexData
synDict = self._synDict
sametypesequence = self._sametypesequence
dictFile = self._dictFile
unicode_errors = self._unicode_errors
if not dictFile:
raise RuntimeError("iterating over a reader while it's not open")
if not indexData:
log.warning("indexData is empty")
return
for entryIndex, (b_word, defiOffset, defiSize) in enumerate(indexData):
if not b_word:
continue
dictFile.seek(defiOffset)
if dictFile.tell() != defiOffset:
log.error(f"Unable to read definition for word {b_word!r}")
continue
b_defiBlock = dictFile.read(defiSize)
if len(b_defiBlock) != defiSize:
log.error(f"Unable to read definition for word {b_word!r}")
continue
if sametypesequence:
rawDefiList = self.parseDefiBlockCompact(
b_defiBlock,
sametypesequence,
)
else:
rawDefiList = self.parseDefiBlockGeneral(b_defiBlock)
if rawDefiList is None:
log.error(f"Data file is corrupted. Word {b_word!r}")
continue
word: "str | list[str]"
word = b_word.decode("utf-8", errors=unicode_errors)
try:
alts = synDict[entryIndex]
except KeyError: # synDict is dict
pass
else:
word = [word] + alts
defi, defiFormat = self.renderRawDefiList(
rawDefiList,
unicode_errors,
)
# FIXME:
# defi = defi.replace(' src="./res/', ' src="./')
yield self._glos.newEntry(word, defi, defiFormat=defiFormat)
if isdir(self._resDir):
for fname in os.listdir(self._resDir):
fpath = join(self._resDir, fname)
with open(fpath, "rb") as _file:
yield self._glos.newDataEntry(
fname,
_file.read(),
)
def readSynFile(self) -> dict[int, list[str]]:
"""Return synDict, a dict { entryIndex -> altList }."""
if self._wordCount is None:
raise RuntimeError("self._wordCount is None")
unicode_errors = self._unicode_errors
synBytes = b""
if isfile(self._filename + ".syn"):
with open(self._filename + ".syn", mode="rb") as _file:
synBytes = _file.read()
elif isfile(self._filename + ".syn.dz"):
with gzip.open(self._filename + ".syn.dz", mode="rb") as _zfile:
synBytes = _zfile.read()
else:
return {}
synBytesLen = len(synBytes)
synDict: "dict[int, list[str]]" = {}
pos = 0
while pos < synBytesLen:
beg = pos
pos = synBytes.find(b"\x00", beg)
if pos < 0:
log.error("Synonym file is corrupted")
break
b_alt = synBytes[beg:pos] # b_alt is bytes
pos += 1
if pos + 4 > len(synBytes):
log.error("Synonym file is corrupted")
break
entryIndex = uint32FromBytes(synBytes[pos : pos + 4])
pos += 4
if entryIndex >= self._wordCount:
log.error(
"Corrupted synonym file. "
f"Word {b_alt!r} references invalid item",
)
continue
s_alt = b_alt.decode("utf-8", errors=unicode_errors)
# s_alt is str
try:
synDict[entryIndex].append(s_alt)
except KeyError:
synDict[entryIndex] = [s_alt]
return synDict
@staticmethod
def parseDefiBlockCompact(
b_block: bytes,
sametypesequence: str,
) -> list[tuple[bytes, int]] | None:
"""
Parse definition block when sametypesequence option is specified.
Return a list of (b_defi, defiFormatCode) tuples
where b_defi is a bytes instance
and defiFormatCode is int, so: defiFormat = chr(defiFormatCode)
"""
b_sametypesequence = sametypesequence.encode("utf-8")
if not b_sametypesequence:
raise ValueError(f"{b_sametypesequence = }")
res = []
i = 0
for t in b_sametypesequence[:-1]:
if i >= len(b_block):
return None
if bytes([t]).islower():
beg = i
i = b_block.find(b"\x00", beg)
if i < 0:
return None
res.append((b_block[beg:i], t))
i += 1
else:
# assert bytes([t]).isupper()
if i + 4 > len(b_block):
return None
size = uint32FromBytes(b_block[i : i + 4])
i += 4
if i + size > len(b_block):
return None
res.append((b_block[i : i + size], t))
i += size
if i >= len(b_block):
return None
t = b_sametypesequence[-1]
if bytes([t]).islower():
if 0 in b_block[i:]:
return None
res.append((b_block[i:], t))
else:
# assert bytes([t]).isupper()
res.append((b_block[i:], t))
return res
@staticmethod
def parseDefiBlockGeneral(
b_block: bytes,
) -> list[tuple[bytes, int]] | None:
"""
Parse definition block when sametypesequence option is not specified.
Return a list of (b_defi, defiFormatCode) tuples
where b_defi is a bytes instance
and defiFormatCode is int, so: defiFormat = chr(defiFormatCode)
"""
res = []
i = 0
while i < len(b_block):
t = b_block[i]
if not bytes([t]).isalpha():
return None
i += 1
if bytes([t]).islower():
beg = i
i = b_block.find(b"\x00", beg)
if i < 0:
return None
res.append((b_block[beg:i], t))
i += 1
else:
# assert bytes([t]).isupper()
if i + 4 > len(b_block):
return None
size = uint32FromBytes(b_block[i : i + 4])
i += 4
if i + size > len(b_block):
return None
res.append((b_block[i : i + size], t))
i += size
return res
# def readResources(self):
# if not isdir(self._resDir):
# resInfoPath = join(baseDirPath, "res.rifo")
# if isfile(resInfoPath):
# log.warning(
# "StarDict resource database is not supported. Skipping"
# )
class Writer:
_large_file: bool = False
_dictzip: bool = True
_sametypesequence: "Literal['', 'h', 'm', 'x'] | None" = ""
_stardict_client: bool = False
_merge_syns: bool = False
_audio_goldendict: bool = False
_audio_icon: bool = True
_sqlite: bool = False
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._resDir = ""
self._sourceLang: "Lang | None" = None
self._targetLang: "Lang | None" = None
self._p_pattern = re.compile(
"<p( [^<>]*?)?>(.*?)</p>",
re.DOTALL,
)
self._br_pattern = re.compile(
"<br[ /]*>",
re.IGNORECASE,
)
self._re_audio_link = re.compile(
'<a (type="sound" )?([^<>]*? )?href="sound://([^<>"]+)"( .*?)?>(.*?)</a>',
)
def finish(self) -> None:
self._filename = ""
self._resDir = ""
self._sourceLang = None
self._targetLang = None
def open(self, filename: str) -> None:
log.debug(f"open: {filename = }")
fileBasePath = filename
##
if splitext(filename)[1].lower() == ".ifo":
fileBasePath = splitext(filename)[0]
elif filename.endswith(os.sep):
if not isdir(filename):
os.makedirs(filename)
fileBasePath = join(filename, split(filename[:-1])[-1])
elif isdir(filename):
fileBasePath = join(filename, split(filename)[-1])
parentDir = split(fileBasePath)[0]
if not isdir(parentDir):
log.info(f"Creating directory {parentDir}")
os.mkdir(parentDir)
##
if fileBasePath:
fileBasePath = realpath(fileBasePath)
self._filename = fileBasePath
self._resDir = join(dirname(fileBasePath), "res")
self._sourceLang = self._glos.sourceLang
self._targetLang = self._glos.targetLang
if self._sametypesequence:
log.debug(f"Using write option sametypesequence={self._sametypesequence}")
elif self._sametypesequence is not None:
stat = self._glos.collectDefiFormat(100)
log.debug(f"defiFormat stat: {stat}")
if stat:
if stat["m"] > 0.97:
log.info("Auto-selecting sametypesequence=m")
self._sametypesequence = "m"
elif stat["h"] > 0.5:
log.info("Auto-selecting sametypesequence=h")
self._sametypesequence = "h"
def write(self) -> Generator[None, EntryType, None]:
from pyglossary.os_utils import runDictzip
if self._sametypesequence:
if self._merge_syns:
yield from self.writeCompactMergeSyns(self._sametypesequence)
else:
yield from self.writeCompact(self._sametypesequence)
elif self._merge_syns:
yield from self.writeGeneralMergeSyns()
else:
yield from self.writeGeneral()
if self._dictzip:
runDictzip(f"{self._filename}.dict")
syn_file = f"{self._filename}.syn"
if not self._merge_syns and os.path.exists(syn_file):
runDictzip(syn_file)
def fixDefi(self, defi: str, defiFormat: str) -> str:
# for StarDict 3.0:
if self._stardict_client and defiFormat == "h":
defi = self._p_pattern.sub("\\2<br>", defi)
# if there is </p> left without opening, replace with <br>
defi = defi.replace("</p>", "<br>")
defi = self._br_pattern.sub("<br>", defi)
if self._audio_goldendict:
if self._audio_icon:
defi = self._re_audio_link.sub(
r'<audio src="\3">\5</audio>',
defi,
)
else:
defi = self._re_audio_link.sub(
r'<audio src="\3"></audio>',
defi,
)
# FIXME:
# defi = defi.replace(' src="./', ' src="./res/')
return defi
def newIdxList(self) -> T_SdList:
if not self._sqlite:
return MemSdList()
return IdxSqList(join(self._glos.tmpDataDir, "stardict-idx.db"))
def newSynList(self) -> T_SdList:
if not self._sqlite:
return MemSdList()
return SynSqList(join(self._glos.tmpDataDir, "stardict-syn.db"))
def dictMarkToBytesFunc(self) -> tuple[Callable, int]:
if self._large_file:
return uint64ToBytes, 0xFFFFFFFFFFFFFFFF
return uint32ToBytes, 0xFFFFFFFF
def writeCompact(self, defiFormat: str) -> Generator[None, EntryType, None]:
"""
Build StarDict dictionary with sametypesequence option specified.
Every item definition consists of a single article.
All articles have the same format, specified in defiFormat parameter.
defiFormat: format of article definition: h - html, m - plain text
"""
log.debug(f"writeCompact: {defiFormat=}")
dictMark = 0
altIndexList = self.newSynList()
dictFile = open(self._filename + ".dict", "wb")
idxFile = open(self._filename + ".idx", "wb")
dictMarkToBytes, dictMarkMax = self.dictMarkToBytesFunc()
t0 = now()
wordCount = 0
if not isdir(self._resDir):
os.mkdir(self._resDir)
entryIndex = -1
while True:
entry = yield
if entry is None:
break
if entry.isData():
entry.save(self._resDir)
continue
entryIndex += 1
words = entry.l_word # list of strs
word = words[0] # str
defi = self.fixDefi(entry.defi, defiFormat)
# defi is str
for alt in words[1:]:
altIndexList.append((alt.encode("utf-8"), entryIndex))
b_dictBlock = defi.encode("utf-8")
dictFile.write(b_dictBlock)
blockLen = len(b_dictBlock)
b_idxBlock = (
word.encode("utf-8")
+ b"\x00"
+ dictMarkToBytes(dictMark)
+ uint32ToBytes(blockLen)
)
idxFile.write(b_idxBlock)
dictMark += blockLen
wordCount += 1
if dictMark > dictMarkMax:
raise Error(
f"StarDict: {dictMark = } is too big, set option large_file=true",
)
dictFile.close()
idxFile.close()
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
log.info(f"Writing dict file took {now() - t0:.2f} seconds")
self.writeSynFile(altIndexList)
self.writeIfoFile(
wordCount,
len(altIndexList),
defiFormat=defiFormat,
)
def writeGeneral(self) -> Generator[None, EntryType, None]:
"""
Build StarDict dictionary in general case.
Every item definition may consist of an arbitrary number of articles.
sametypesequence option is not used.
"""
log.debug("writeGeneral")
dictMark = 0
altIndexList = self.newSynList()
dictFile = open(self._filename + ".dict", "wb")
idxFile = open(self._filename + ".idx", "wb")
t0 = now()
wordCount = 0
defiFormatCounter: "typing.Counter[str]" = Counter()
if not isdir(self._resDir):
os.mkdir(self._resDir)
dictMarkToBytes, dictMarkMax = self.dictMarkToBytesFunc()
entryIndex = -1
while True:
entry = yield
if entry is None:
break
if entry.isData():
entry.save(self._resDir)
continue
entryIndex += 1
entry.detectDefiFormat() # call no more than once
defiFormat = entry.defiFormat
defiFormatCounter[defiFormat] += 1
if defiFormat not in {"h", "m", "x"}:
log.error(f"invalid {defiFormat=}, using 'm'")
defiFormat = "m"
words = entry.l_word # list of strs
word = words[0] # str
defi = self.fixDefi(entry.defi, defiFormat)
# defi is str
for alt in words[1:]:
altIndexList.append((alt.encode("utf-8"), entryIndex))
b_dictBlock = (defiFormat + defi).encode("utf-8") + b"\x00"
dictFile.write(b_dictBlock)
blockLen = len(b_dictBlock)
b_idxBlock = (
word.encode("utf-8")
+ b"\x00"
+ dictMarkToBytes(dictMark)
+ uint32ToBytes(blockLen)
)
idxFile.write(b_idxBlock)
dictMark += blockLen
wordCount += 1
if dictMark > dictMarkMax:
raise Error(
f"StarDict: {dictMark = } is too big, set option large_file=true",
)
dictFile.close()
idxFile.close()
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
log.info(f"Writing dict file took {now() - t0:.2f} seconds")
log.debug("defiFormatsCount = " + pformat(defiFormatCounter.most_common()))
self.writeSynFile(altIndexList)
self.writeIfoFile(
wordCount,
len(altIndexList),
defiFormat="",
)
def writeSynFile(self, altIndexList: "T_SdList[tuple[bytes, int]]") -> None:
"""Build .syn file."""
if not altIndexList:
return
log.info(f"Sorting {len(altIndexList)} synonyms...")
t0 = now()
altIndexList.sort()
# 28 seconds with old sort key (converted from custom cmp)
# 0.63 seconds with my new sort key
# 0.20 seconds without key function (default sort)
log.info(
f"Sorting {len(altIndexList)} synonyms took {now() - t0:.2f} seconds",
)
log.info(f"Writing {len(altIndexList)} synonyms...")
t0 = now()
with open(self._filename + ".syn", "wb") as synFile:
synFile.write(
b"".join(
b_alt + b"\x00" + uint32ToBytes(entryIndex)
for b_alt, entryIndex in altIndexList
),
)
log.info(
f"Writing {len(altIndexList)} synonyms took {now() - t0:.2f} seconds",
)
def writeCompactMergeSyns(
self,
defiFormat: str,
) -> Generator[None, EntryType, None]:
"""
Build StarDict dictionary with sametypesequence option specified.
Every item definition consists of a single article.
All articles have the same format, specified in defiFormat parameter.
defiFormat - format of article definition: h - html, m - plain text
"""
log.debug(f"writeCompactMergeSyns: {defiFormat=}")
dictMark = 0
idxBlockList = self.newIdxList()
altIndexList = self.newSynList()
dictFile = open(self._filename + ".dict", "wb")
t0 = now()
if not isdir(self._resDir):
os.mkdir(self._resDir)
dictMarkToBytes, dictMarkMax = self.dictMarkToBytesFunc()
entryIndex = -1
while True:
entry = yield
if entry is None:
break
if entry.isData():
entry.save(self._resDir)
continue
entryIndex += 1
words = entry.l_word # list of strs
word = words[0] # str
defi = self.fixDefi(entry.defi, defiFormat)
# defi is str
b_dictBlock = defi.encode("utf-8")
dictFile.write(b_dictBlock)
blockLen = len(b_dictBlock)
blockData = dictMarkToBytes(dictMark) + uint32ToBytes(blockLen)
for word in words:
idxBlockList.append((word.encode("utf-8"), blockData))
dictMark += blockLen
if dictMark > dictMarkMax:
raise Error(
f"StarDict: {dictMark = } is too big, set option large_file=true",
)
wordCount = self.writeIdxFile(idxBlockList)
dictFile.close()
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
log.info(f"Writing dict file took {now() - t0:.2f} seconds")
self.writeIfoFile(
wordCount,
len(altIndexList),
defiFormat=defiFormat,
)
def writeGeneralMergeSyns(self) -> Generator[None, EntryType, None]:
"""
Build StarDict dictionary in general case.
Every item definition may consist of an arbitrary number of articles.
sametypesequence option is not used.
"""
log.debug("writeGeneralMergeSyns")
dictMark = 0
idxBlockList = self.newIdxList()
altIndexList = self.newSynList()
dictFile = open(self._filename + ".dict", "wb")
t0 = now()
wordCount = 0
defiFormatCounter: "typing.Counter[str]" = Counter()
if not isdir(self._resDir):
os.mkdir(self._resDir)
dictMarkToBytes, dictMarkMax = self.dictMarkToBytesFunc()
entryIndex = -1
while True:
entry = yield
if entry is None:
break
if entry.isData():
entry.save(self._resDir)
continue
entryIndex += 1
entry.detectDefiFormat() # call no more than once
defiFormat = entry.defiFormat
defiFormatCounter[defiFormat] += 1
if defiFormat not in {"h", "m", "x"}:
log.error(f"invalid {defiFormat=}, using 'm'")
defiFormat = "m"
words = entry.l_word # list of strs
word = words[0] # str
defi = self.fixDefi(entry.defi, defiFormat)
# defi is str
b_dictBlock = (defiFormat + defi).encode("utf-8") + b"\x00"
dictFile.write(b_dictBlock)
blockLen = len(b_dictBlock)
blockData = dictMarkToBytes(dictMark) + uint32ToBytes(blockLen)
for word in words:
idxBlockList.append((word.encode("utf-8"), blockData))
dictMark += blockLen
if dictMark > dictMarkMax:
raise Error(
f"StarDict: {dictMark = } is too big, set option large_file=true",
)
wordCount = self.writeIdxFile(idxBlockList)
dictFile.close()
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
log.info(f"Writing dict file took {now() - t0:.2f} seconds")
log.debug("defiFormatsCount = " + pformat(defiFormatCounter.most_common()))
self.writeIfoFile(
wordCount,
len(altIndexList),
defiFormat="",
)
def writeIdxFile(self, indexList: "T_SdList[tuple[bytes, bytes]]") -> int:
filename = self._filename + ".idx"
if not indexList:
return 0
log.info(f"Sorting {len(indexList)} items...")
t0 = now()
indexList.sort()
log.info(
f"Sorting {len(indexList)} {filename} took {now() - t0:.2f} seconds",
)
log.info(f"Writing {len(indexList)} index entries...")
t0 = now()
with open(filename, mode="wb") as indexFile:
indexFile.write(b"".join(key + b"\x00" + value for key, value in indexList))
log.info(
f"Writing {len(indexList)} {filename} took {now() - t0:.2f} seconds",
)
return len(indexList)
def writeIfoFile(
self,
wordCount: int,
synWordCount: int,
defiFormat: str = "",
) -> None:
"""Build .ifo file."""
glos = self._glos
bookname = newlinesToSpace(glos.getInfo("name"))
indexFileSize = getsize(self._filename + ".idx")
sourceLang = self._sourceLang
targetLang = self._targetLang
if sourceLang and targetLang:
langs = f"{sourceLang.code}-{targetLang.code}"
if langs not in bookname.lower():
bookname = f"{bookname} ({langs})"
log.info(f"bookname: {bookname}")
ifo: "list[tuple[str, str]]" = [
("version", "3.0.0"),
("bookname", bookname),
("wordcount", str(wordCount)),
("idxfilesize", str(indexFileSize)),
]
if self._large_file:
ifo.append(("idxoffsetbits", "64"))
if defiFormat:
ifo.append(("sametypesequence", defiFormat))
if synWordCount > 0:
ifo.append(("synwordcount", str(synWordCount)))
desc = glos.getInfo("description")
_copyright = glos.getInfo("copyright")
if _copyright:
desc = f"{_copyright}\n{desc}"
publisher = glos.getInfo("publisher")
if publisher:
desc = f"Publisher: {publisher}\n{desc}"
for key in infoKeys:
if key in {
"bookname",
"description",
}:
continue
value = glos.getInfo(key)
if not value:
continue
value = newlinesToSpace(value)
ifo.append((key, value))
ifo.append(("description", newlinesToBr(desc)))
ifoStr = "StarDict's dict ifo file\n"
for key, value in ifo:
ifoStr += f"{key}={value}\n"
with open(
self._filename + ".ifo",
mode="w",
encoding="utf-8",
newline="\n",
) as ifoFile:
ifoFile.write(ifoStr)
| 33,286
|
Python
|
.py
| 1,111
| 26.39784
| 87
| 0.675456
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,830
|
edlin.py
|
ilius_pyglossary/pyglossary/plugins/edlin.py
|
# -*- coding: utf-8 -*-
# edlin.py
#
# Copyright © 2016-2019 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# This file is part of PyGlossary project, https://github.com/ilius/pyglossary
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
import os
from collections.abc import Generator, Iterator
from os.path import dirname, isdir, isfile, join
from typing import TYPE_CHECKING
from pyglossary.core import log
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
from pyglossary.text_utils import (
escapeNTB,
splitByBarUnescapeNTB,
unescapeNTB,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "edlin"
format = "Edlin"
# Editable Linked List of Entries
description = "EDLIN"
extensions = (".edlin",)
extensionCreate = ".edlin/"
singleFile = False
kind = "directory"
wiki = ""
website = None
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"prev_link": BoolOption(comment="Enable link to previous entry"),
}
def makeDir(direc: str) -> None:
if not isdir(direc):
os.makedirs(direc)
class Reader:
_encoding: str = "utf-8"
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def close(self) -> None:
self._clear()
def _clear(self) -> None:
self._filename = ""
self._prev_link = True
self._wordCount = None
self._rootPath = None
self._resDir = ""
self._resFileNames: list[str] = []
def open(self, filename: str) -> None:
from pyglossary.json_utils import jsonToOrderedData
if isdir(filename):
infoFname = join(filename, "info.json")
elif isfile(filename):
infoFname = filename
filename = dirname(filename)
else:
raise ValueError(
f"error while opening {filename!r}: no such file or directory",
)
self._filename = filename
with open(infoFname, encoding=self._encoding) as infoFp:
info = jsonToOrderedData(infoFp.read())
self._wordCount = info.pop("wordCount")
self._prev_link = info.pop("prev_link")
self._rootPath = info.pop("root")
for key, value in info.items():
self._glos.setInfo(key, value)
self._resDir = join(filename, "res")
if isdir(self._resDir):
self._resFileNames = os.listdir(self._resDir)
else:
self._resDir = ""
self._resFileNames = []
def __len__(self) -> int:
if self._wordCount is None:
log.error("called len() on a reader which is not open")
return 0
return self._wordCount + len(self._resFileNames)
def __iter__(self) -> Iterator[EntryType]:
if not self._rootPath:
raise RuntimeError("iterating over a reader while it's not open")
wordCount = 0
nextPath = self._rootPath
while nextPath != "END":
wordCount += 1
# before or after reading word and defi
# (and skipping empty entry)? FIXME
with open(
join(self._filename, nextPath),
encoding=self._encoding,
) as _file:
header = _file.readline().rstrip()
if self._prev_link:
_prevPath, nextPath = header.split(" ")
else:
nextPath = header
word = _file.readline()
if not word:
yield None # update progressbar
continue
defi = _file.read()
if not defi:
log.warning(
f"Edlin Reader: no definition for word {word!r}, skipping",
)
yield None # update progressbar
continue
word = word.rstrip()
defi = defi.rstrip()
if self._glos.alts:
word = splitByBarUnescapeNTB(word)
if len(word) == 1:
word = word[0]
else:
word = unescapeNTB(word, bar=False)
# defi = unescapeNTB(defi)
yield self._glos.newEntry(word, defi)
if wordCount != self._wordCount:
log.warning(
f"{wordCount} words found, "
f"wordCount in info.json was {self._wordCount}",
)
self._wordCount = wordCount
resDir = self._resDir
for fname in self._resFileNames:
with open(join(resDir, fname), "rb") as _file:
yield self._glos.newDataEntry(
fname,
_file.read(),
)
class Writer:
_encoding: str = "utf-8"
_prev_link: bool = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def finish(self) -> None:
self._clear()
def open(self, filename: str) -> None:
self._filename = filename
self._resDir = join(filename, "res")
os.makedirs(filename)
os.mkdir(self._resDir)
def _clear(self) -> None:
self._filename = ""
self._resDir = ""
self._encoding = "utf-8"
self._hashSet: set[str] = set()
# self._wordCount = None
@staticmethod
def hashToPath(h: str) -> str:
return h[:2] + "/" + h[2:]
def getEntryHash(self, entry: EntryType) -> str:
"""
Return hash string for given entry
don't call it twice for one entry, if you do you will get a
different hash string.
"""
from hashlib import sha1
_hash = sha1(entry.s_word.encode("utf-8")).hexdigest()[:8] # noqa: S324
if _hash not in self._hashSet:
self._hashSet.add(_hash)
return _hash
index = 0
while True:
tmp_hash = _hash + f"{index:x}"
if tmp_hash not in self._hashSet:
self._hashSet.add(tmp_hash)
return tmp_hash
index += 1
def saveEntry(
self,
thisEntry: EntryType,
thisHash: str,
prevHash: "str | None",
nextHash: "str | None",
) -> None:
dpath = join(self._filename, thisHash[:2])
makeDir(dpath)
with open(
join(dpath, thisHash[2:]),
"w",
encoding=self._encoding,
) as toFile:
nextPath = self.hashToPath(nextHash) if nextHash else "END"
if self._prev_link:
prevPath = self.hashToPath(prevHash) if prevHash else "START"
header = prevPath + " " + nextPath
else:
header = nextPath
toFile.write(
"\n".join(
[
header,
escapeNTB(thisEntry.s_word, bar=False),
thisEntry.defi,
],
),
)
def write(self) -> Generator[None, EntryType, None]:
from collections import OrderedDict as odict
from pyglossary.json_utils import dataToPrettyJson
thisEntry = yield
if thisEntry is None:
raise ValueError("glossary is empty")
count = 1
rootHash = thisHash = self.getEntryHash(thisEntry)
prevHash = None
while True:
nextEntry = yield
if nextEntry is None:
break
if nextEntry.isData():
nextEntry.save(self._resDir)
continue
nextHash = self.getEntryHash(nextEntry)
self.saveEntry(thisEntry, thisHash, prevHash, nextHash)
thisEntry = nextEntry
prevHash, thisHash = thisHash, nextHash
count += 1
self.saveEntry(thisEntry, thisHash, prevHash, None)
with open(
join(self._filename, "info.json"),
"w",
encoding=self._encoding,
) as toFile:
info = odict()
info["name"] = self._glos.getInfo("name")
info["root"] = self.hashToPath(rootHash)
info["prev_link"] = self._prev_link
info["wordCount"] = count
# info["modified"] =
for key, value in self._glos.getExtraInfos(
(
"name",
"root",
"prev_link",
"wordCount",
),
).items():
info[key] = value
toFile.write(dataToPrettyJson(info))
| 7,747
|
Python
|
.py
| 278
| 24.521583
| 78
| 0.690681
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,831
|
jmdict.py
|
ilius_pyglossary/pyglossary/plugins/jmdict.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
import re
import unicodedata
from io import BytesIO
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import io
from collections.abc import Callable, Iterator
from pyglossary.glossary_types import (
EntryType,
GlossaryType,
)
from pyglossary.lxml_types import Element, T_htmlfile
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import exc_note, pip
from pyglossary.io_utils import nullBinaryIO
from pyglossary.option import (
BoolOption,
IntOption,
Option,
StrOption,
)
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "jmdict"
format = "JMDict"
description = "JMDict (xml)"
extensions = ()
extensionCreate = ""
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/JMdict"
website = (
"https://www.edrdg.org/jmdict/j_jmdict.html",
"The JMDict Project",
)
optionsProp: "dict[str, Option]" = {
"example_color": StrOption(
comment="Examples color",
),
"example_padding": IntOption(
comment="Padding for examples (in px)",
),
"translitation": BoolOption(
comment="Add translitation (romaji) of keywords",
),
}
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
_example_padding: int = 10
_example_color: str = ""
# _example_color: str = "#008FE1"
_translitation: bool = False
tagStyle = (
"color:white;"
"background:green;"
"padding-left:3px;"
"padding-right:3px;"
"border-radius:0.5ex;"
# 0.5ex ~= 0.3em, but "ex" is recommended
)
gikun_key = "gikun (meaning as reading) or jukujikun (special kanji reading)"
re_inf_mapping = {
gikun_key: "gikun/jukujikun",
"out-dated or obsolete kana usage": "obsolete", # outdated/obsolete
"word containing irregular kana usage": "irregular",
}
@staticmethod
def makeList(
hf: "T_htmlfile",
input_objects: list[Element],
processor: Callable,
single_prefix: str = "",
skip_single: bool = True,
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not input_objects:
return
if skip_single and len(input_objects) == 1:
hf.write(single_prefix)
processor(hf, input_objects[0])
return
with hf.element("ol"):
for el in input_objects:
with hf.element("li"):
processor(hf, el)
# TODO: break it down
# PLR0912 Too many branches (23 > 12)
def writeSense( # noqa: PLR0912
self,
hf: "T_htmlfile",
sense: Element,
) -> None:
from lxml import etree as ET
def br() -> Element:
return ET.Element("br")
for elem in sense.findall("pos"):
if not elem.text:
continue
desc = elem.text
if desc == "unclassified":
continue
with hf.element("i"):
hf.write(f"{desc.capitalize()}")
hf.write(br())
glossList = [elem.text.strip() for elem in sense.findall("gloss") if elem.text]
if glossList:
for i, gloss in enumerate(glossList):
if i > 0:
hf.write(", ")
hf.write(gloss)
hf.write(br())
relatedWords = []
for elem in sense.findall("xref"):
if not elem.text:
continue
word = elem.text.strip()
word = self._link_number_postfix.sub("", word)
relatedWords.append(word)
if relatedWords:
hf.write("Related: ")
for i, word in enumerate(relatedWords):
if i > 0:
with hf.element("big"):
hf.write(" | ")
with hf.element("a", href=f"bword://{word}"):
hf.write(word)
hf.write(br())
antonymWords = []
for elem in sense.findall("ant"):
if not elem.text:
continue
word = elem.text.strip()
word = self._link_number_postfix.sub("", word)
antonymWords.append(word)
if antonymWords:
hf.write("Antonym: ")
for i, word in enumerate(antonymWords):
if i > 0:
with hf.element("big"):
hf.write(" | ")
with hf.element(
"a",
href=f"bword://{word}",
attrib={"class": "antonym"},
):
hf.write(word)
hf.write(br())
for i, elem in enumerate(sense.findall("field")):
if not elem.text:
continue
if i > 0:
hf.write(" ")
desc = elem.text
with hf.element("span", style=self.tagStyle):
hf.write(desc)
hf.write(br())
for i, elem in enumerate(sense.findall("misc")):
if not elem.text:
continue
if i > 0:
hf.write(" ")
desc = elem.text
with hf.element("small"):
with hf.element("span", style=self.tagStyle):
hf.write(desc)
hf.write(br())
examples = sense.findall("example")
# TODO: move to a method
if examples: # noqa: PLR1702
with hf.element(
"div",
attrib={
"class": "example",
"style": f"padding: {self._example_padding}px 0px;",
},
):
hf.write("Examples:")
with hf.element("ul"):
for i, elem in enumerate(examples):
if not elem.text:
continue
if i > 0:
hf.write(" ")
# one ex_srce (id?), one ex_text, and two ex_sent tags
textElem = elem.find("ex_text")
if textElem is None:
continue
if not textElem.text:
continue
text = textElem.text
sentList: list[str] = []
for sentElem in elem.findall("ex_sent"):
if not sentElem.text:
continue
sentList.append(sentElem.text)
with hf.element("li"):
style = {}
if self._example_color:
style["color"] = self._example_color
with hf.element("font", attrib=style):
hf.write(text)
for sent in sentList:
hf.write(br())
hf.write(sent)
# TODO: break it down
def getEntryByElem( # noqa: PLR0912
self,
entry: Element,
) -> EntryType:
from lxml import etree as ET
glos = self._glos
keywords = []
f = BytesIO()
translit = self._translitation
def br() -> Element:
return ET.Element("br")
with ET.htmlfile(f, encoding="utf-8") as hf: # noqa: PLR1702
kebList: list[str] = []
rebList: list[str] = []
kebDisplayList: list[str] = []
rebDisplayList: "list[tuple[str, list[str]]]" = []
with hf.element("div"):
for k_ele in entry.findall("k_ele"):
keb = k_ele.find("keb")
if keb is None:
continue
if not keb.text:
continue
keb_text = keb.text
keb_text_norm = unicodedata.normalize("NFKC", keb_text)
keywords.append(keb_text_norm)
if keb_text != keb_text_norm:
keywords.append(keb_text)
kebList.append(keb_text)
keb_display = keb_text
if translit:
import romkan # type: ignore
t_keb = romkan.to_roma(keb_text)
if t_keb and t_keb.isascii():
keywords.append(t_keb)
keb_display += f" ({t_keb})"
kebDisplayList.append(keb_display)
# for elem in k_ele.findall("ke_pri"):
# log.info(elem.text)
for r_ele in entry.findall("r_ele"):
reb = r_ele.find("reb")
if reb is None:
continue
if not reb.text:
continue
props = []
if r_ele.find("re_nokanji") is not None:
props.append("no kanji")
inf = r_ele.find("re_inf")
if inf is not None and inf.text:
props.append(
self.re_inf_mapping.get(inf.text, inf.text),
)
keywords.append(reb.text)
reb_text = reb.text
rebList.append(reb_text)
reb_display = reb_text
if translit:
import romkan
t_reb = romkan.to_roma(reb.text)
if t_reb and t_reb.isascii():
keywords.append(t_reb)
reb_display += f" ({t_reb})"
rebDisplayList.append((reb_display, props))
# for elem in r_ele.findall("re_pri"):
# log.info(elem.text)
# this is for making internal links valid
# this makes too many alternates!
# but we don't seem to have a choice
# except for scanning and indexing all words once
# and then starting over and fixing/optimizing links
for s_keb in kebList:
for s_reb in rebList:
keywords.append(f"{s_keb}・{s_reb}") # noqa: PERF401
if kebDisplayList:
with hf.element(glos.titleTag(kebDisplayList[0])):
for i, s_keb in enumerate(kebDisplayList):
if i > 0:
with hf.element("font", color="red"):
hf.write(" | ")
hf.write(s_keb)
hf.write(br())
if rebDisplayList:
for i, (s_reb, props) in enumerate(rebDisplayList):
if i > 0:
with hf.element("font", color="red"):
hf.write(" | ")
with hf.element("font", color="green"):
hf.write(s_reb)
for prop in props:
hf.write(" ")
with hf.element("small"):
with hf.element("span", style=self.tagStyle):
hf.write(prop)
hf.write(br())
_hf = cast("T_htmlfile", hf)
self.makeList(
_hf,
entry.findall("sense"),
self.writeSense,
)
defi = f.getvalue().decode("utf-8")
_file = self._file
byteProgress = (_file.tell(), self._fileSize)
return self._glos.newEntry(
keywords,
defi,
defiFormat="h",
byteProgress=byteProgress,
)
@staticmethod
def tostring(elem: Element) -> str:
from lxml import etree as ET
return (
ET.tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
def setCreationTime(self, header: str) -> None:
m = re.search("JMdict created: ([0-9]{4}-[0-9]{2}-[0-9]{2})", header)
if m is None:
return
self._glos.setInfo("creationTime", m.group(1))
def setMetadata(self, header: str) -> None:
# TODO: self.set_info("edition", ...)
self.setCreationTime(header)
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._wordCount = 0
self._filename = ""
self._file: "io.IOBase" = nullBinaryIO
self._fileSize = 0
self._link_number_postfix = re.compile("・[0-9]+$")
def __len__(self) -> int:
return self._wordCount
def close(self) -> None:
if self._file:
self._file.close()
self._file = nullBinaryIO
def open(
self,
filename: str,
) -> None:
try:
from lxml import etree as ET # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
self._filename = filename
self._fileSize = os.path.getsize(filename)
self._glos.sourceLangName = "Japanese"
self._glos.setDefaultDefiFormat("h")
self._glos.setInfo("definition_has_headwords", "True")
self._glos.setInfo("entry_url", "https://jisho.org/search/{word}")
# also good: f"https://sakuradict.com/search?q={{word}}"
header = ""
with compressionOpen(filename, mode="rt", encoding="utf-8") as text_file:
text_file = cast("io.TextIOBase", text_file)
for line in text_file:
if "<JMdict>" in line:
break
header += line
self.setMetadata(header)
self._file = compressionOpen(filename, mode="rb")
def __iter__(self) -> Iterator[EntryType]:
from lxml import etree as ET
context = ET.iterparse( # type: ignore # noqa: PGH003
self._file,
events=("end",),
tag="entry",
)
for _, _elem in context:
elem = cast("Element", _elem)
yield self.getEntryByElem(elem)
# clean up preceding siblings to save memory
# this reduces memory usage from ~64 MB to ~30 MB
parent = elem.getparent()
if parent is None:
continue
while elem.getprevious() is not None:
del parent[0]
| 11,181
|
Python
|
.py
| 412
| 22.839806
| 81
| 0.645697
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,832
|
dictunformat.py
|
ilius_pyglossary/pyglossary/plugins/dictunformat.py
|
from __future__ import annotations
from pyglossary.core import log
from pyglossary.option import EncodingOption, Option, StrOption
from pyglossary.text_reader import TextGlossaryReader
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dictunformat"
format = "Dictunformat"
description = "dictunformat output file"
extensions = (".dictunformat",)
extensionCreate = ".dictunformat"
singleFile = True
kind = "text"
wiki = "https://directory.fsf.org/wiki/Dictd"
website = (
"https://github.com/cheusov/dictd/blob/master/dictunformat.1.in",
"dictd/dictunformat.1.in - @cheusov/dictd",
)
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"headword_separator": StrOption(
comment="separator for headword and alternates",
),
}
def unescapeDefi(defi: str) -> str:
return defi
class Reader(TextGlossaryReader):
_headword_separator = "; "
# https://github.com/cheusov/dictd/blob/master/dictfmt/dictunformat.in#L14
@classmethod
def isInfoWord(cls, word: str) -> bool:
return word.startswith("00-database-")
@classmethod
def fixInfoWord(cls, word: str) -> str:
return word
def setInfo(self, word: str, defi: str) -> None:
if word == "00-database-short":
self._glos.setInfo("name", defi)
return
if word != "00-database-info":
return
glos = self._glos
lastKey = ""
for line in defi.split("\n"):
if not line.startswith("##:"):
if lastKey:
glos.setInfo(word, f"{glos.getInfo(lastKey)}\n{line}")
continue
parts = line[3:].split(":")
if len(parts) < 2:
log.error(f"unexpected line: {line}")
key = lastKey = parts[0]
value = ":".join(parts[1:])
glos.setInfo(key, value)
def nextBlock(self) -> tuple[str | list[str], str, None] | None:
if not self._file:
raise StopIteration
word = ""
defiLines: list[str] = []
while True:
line = self.readline()
if not line:
break
line = line.rstrip("\n\r")
if not line:
continue
if not line.strip("_"):
if not word:
continue
if not defiLines:
log.warning(f"no definition/value for {word!r}")
defi = unescapeDefi("\n".join(defiLines))
words = word.split(self._headword_separator)
return words, defi, None
if not word:
word = line
continue
if line == word:
continue
if line.lower() == word:
word = line
continue
defiLines.append(line)
if word:
defi = unescapeDefi("\n".join(defiLines))
if word.startswith("00-database-") and defi == "unknown":
log.info(f"ignoring {word} -> {defi}")
return None
words = word.split(self._headword_separator)
return words, defi, None
raise StopIteration
| 2,774
|
Python
|
.py
| 104
| 23.480769
| 75
| 0.689199
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,833
|
abc_medical_notes.py
|
ilius_pyglossary/pyglossary/plugins/abc_medical_notes.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import html
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import sqlite3
from collections.abc import Iterator
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "abc_medical_notes"
format = "ABCMedicalNotes"
description = "ABC Medical Notes (SQLite3)"
extensions = ()
extensionCreate = ".db"
singleFile = True
kind = "binary"
wiki = ""
_url = "https://play.google.com/store/apps/details?id=com.pocketmednotes2014.secondapp"
website = (
_url,
"ABC Medical Notes 2021 - Google Play",
)
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
self._con = connect(filename)
self._cur = self._con.cursor()
self._glos.setDefaultDefiFormat("h")
def __len__(self) -> int:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute("select count(*) from NEW_TABLE")
return self._cur.fetchone()[0]
def __iter__(self) -> Iterator[EntryType]:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute(
"select _id, contents from NEW_TABLE where _id is not null",
)
# FIXME: iteration over self._cur stops after one entry
# and self._cur.fetchone() returns None
# for row in self._cur:
for row in self._cur.fetchall():
word = html.unescape(row[0])
definition = row[1].decode("utf-8", errors="ignore")
# print(f"{word!r}, {definition!r}")
yield self._glos.newEntry(word, definition, defiFormat="h")
def close(self) -> None:
if self._cur:
self._cur.close()
if self._con:
self._con.close()
self._clear()
| 2,119
|
Python
|
.py
| 77
| 25.090909
| 87
| 0.698866
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,834
|
lingoes_ldf.py
|
ilius_pyglossary/pyglossary/plugins/lingoes_ldf.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Generator
from typing import TYPE_CHECKING
from pyglossary.compression import (
# compressionOpen,
stdCompressions,
)
from pyglossary.core import log
from pyglossary.file_utils import fileCountLines
from pyglossary.option import (
BoolOption,
EncodingOption,
NewlineOption,
Option,
)
from pyglossary.text_reader import TextGlossaryReader, nextBlockResultType
from pyglossary.text_utils import splitByBar
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "lingoes_ldf"
format = "LingoesLDF"
description = "Lingoes Source (.ldf)"
extensions = (".ldf",)
extensionCreate = ".ldf"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/Lingoes"
website = (
"http://www.lingoes.net/en/dictionary/dict_format.php",
"Lingoes.net",
)
optionsProp: "dict[str, Option]" = {
"newline": NewlineOption(),
"resources": BoolOption(comment="Enable resources / data files"),
"encoding": EncodingOption(),
}
class Reader(TextGlossaryReader):
compressions = stdCompressions
def __len__(self) -> int:
if self._wordCount is None:
log.debug("Try not to use len(reader) as it takes extra time")
self._wordCount = (
fileCountLines(
self._filename,
newline=b"\n\n",
)
- self._leadingLinesCount
)
return self._wordCount
@classmethod
def isInfoWord(cls, word: str) -> bool:
if isinstance(word, str):
return word.startswith("#")
return False
@classmethod
def fixInfoWord(cls, word: str) -> str:
if isinstance(word, str):
return word.lstrip("#").lower()
return word
def nextBlock(self) -> nextBlockResultType:
if not self._file:
raise StopIteration
entryLines = []
while True:
line = self.readline()
if not line:
raise StopIteration
line = line.rstrip("\n\r") # FIXME
if line.startswith("###"):
parts = line.split(":")
key = parts[0].strip()
value = ":".join(parts[1:]).strip()
return key, value, None
if line:
entryLines.append(line)
continue
# now `line` is empty, process `entryLines`
if not entryLines:
return None
if len(entryLines) < 2:
log.error(
f"invalid block near pos {self._file.tell()}"
f" in file {self._filename}",
)
return None
word = entryLines[0]
defi = "\n".join(entryLines[1:])
defi = defi.replace("<br/>", "\n") # FIXME
words = splitByBar(word)
return words, defi, None
class Writer:
compressions = stdCompressions
_newline: str = "\n"
_resources: bool = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
def getInfo(self, key: str) -> str:
return self._glos.getInfo(key).replace("\n", "<br>")
def getAuthor(self) -> str:
return self._glos.author.replace("\n", "<br>")
def finish(self) -> None:
self._filename = ""
def open(self, filename: str) -> None:
self._filename = filename
@staticmethod
def _defiEscapeFunc(defi: str) -> str:
return defi.replace("\n", "<br/>")
def write(self) -> Generator[None, EntryType, None]:
from pyglossary.text_writer import writeTxt
newline = self._newline
resources = self._resources
head = (
f"###Title: {self.getInfo('title')}\n"
f"###Description: {self.getInfo('description')}\n"
f"###Author: {self.getAuthor()}\n"
f"###Email: {self.getInfo('email')}\n"
f"###Website: {self.getInfo('website')}\n"
f"###Copyright: {self.getInfo('copyright')}\n"
)
yield from writeTxt(
self._glos,
entryFmt="{word}\n{defi}\n\n",
filename=self._filename,
writeInfo=False,
defiEscapeFunc=self._defiEscapeFunc,
ext=".ldf",
head=head,
newline=newline,
resources=resources,
)
| 3,937
|
Python
|
.py
| 148
| 23.608108
| 74
| 0.692901
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,835
|
freedict.py
|
ilius_pyglossary/pyglossary/plugins/freedict.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import re
from io import BytesIO, IOBase
from os.path import dirname, isfile, join
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from collections.abc import Callable, Iterator
from typing import Any
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element, T_htmlfile
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import exc_note, log, pip
from pyglossary.html_utils import unescape_unicode
from pyglossary.io_utils import nullBinaryIO
from pyglossary.langs import langDict
from pyglossary.langs.writing_system import getWritingSystemFromText
from pyglossary.option import (
BoolOption,
IntOption,
Option,
StrOption,
)
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "freedict"
format = "FreeDict"
description = "FreeDict (.tei)"
extensions = (".tei",)
extensionCreate = ".tei"
singleFile = True
kind = "text"
wiki = "https://github.com/freedict/fd-dictionaries/wiki"
website = (
"https://freedict.org/",
"FreeDict.org",
)
optionsProp: "dict[str, Option]" = {
"resources": BoolOption(
comment="Enable resources / data files",
),
"discover": BoolOption(
comment="Find and show unsupported tags",
),
"auto_rtl": BoolOption(
allowNone=True,
comment="Auto-detect and mark Right-to-Left text",
),
"auto_comma": BoolOption(
comment="Auto-detect comma sign based on text",
),
"comma": StrOption(
customValue=True,
values=[", ", "، "],
comment="Comma sign (following space) to use as separator",
),
"word_title": BoolOption(
comment="Add headwords title to beginning of definition",
),
"pron_color": StrOption(
comment="Pronunciation color",
),
"gram_color": StrOption(
comment="Grammar color",
),
"example_padding": IntOption(
comment="Padding for examples (in px)",
),
}
tei = "{http://www.tei-c.org/ns/1.0}"
ENTRY = f"{tei}entry"
INCLUDE = "{http://www.w3.org/2001/XInclude}include"
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
_discover: bool = False
_auto_rtl: "bool | None" = None
_auto_comma: bool = True
_comma: str = ", "
_word_title: bool = False
_pron_color: str = "gray"
_gram_color: str = "green"
_example_padding: int = 10
ns = {
None: "http://www.tei-c.org/ns/1.0",
}
xmlLang = "{http://www.w3.org/XML/1998/namespace}lang"
supportedTags = {
f"{tei}{tag}"
for tag in (
"entry",
"form", # entry.form
"orth", # entry.form.orth
"pron", # entry.form.pron
"sense", # entry.sense
"cit", # entry.sense.cit
"quote", # entry.sense.cit.quote
"gramGrp", # entry.sense.cit.gramGrp
"pos", # entry.sense.cit.gramGrp.pos
"gen", # entry.sense.cit.gramGrp.gen
"number", # entry.sense.cit.gramGrp.number
"num", # entry.sense.cit.gramGrp.num
)
}
posMapping = {
"n": "noun",
"v": "verb",
"pn": "pronoun",
"pron": "pronoun",
"prep": "preposition",
"conj": "conjunction",
"adj": "adjective",
"adv": "adverb",
# "numeral", "interjection", "suffix", "particle"
# "indefinitePronoun"
}
genderMapping = {
"m": "male",
"masc": "male",
"f": "female",
"fem": "female",
"n": "neutral",
"neut": "neutral",
# "m;f"
"adj": "adjective",
}
numberMapping = {
"pl": "plural",
"sing": "singular",
}
subcMapping = {
"t": "transitive",
"i": "intransitive",
}
gramClass = "grammar"
@staticmethod
def makeList( # noqa: PLR0913
hf: "T_htmlfile",
input_objects: list[Any],
processor: Callable,
single_prefix: str = "",
skip_single: bool = True,
ordered: bool = True,
list_type: str = "",
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not input_objects:
return
if skip_single and len(input_objects) == 1:
if single_prefix:
hf.write(single_prefix)
processor(hf, input_objects[0])
return
attrib: "dict[str, str]" = {}
if list_type:
attrib["type"] = list_type
with hf.element("ol" if ordered else "ul", attrib=attrib):
for el in input_objects:
with hf.element("li"):
processor(hf, el)
@staticmethod
def getTitleTag(sample: str) -> str:
ws = getWritingSystemFromText(sample)
if ws:
return ws.titleTag
return "b"
def writeRef( # noqa: PLR6301
self,
hf: "T_htmlfile",
ref: Element,
) -> None:
target = ref.get("target")
attrib: "dict[str, str]" = {}
if target:
if "://" in target:
attrib["class"] = "external"
else:
target = f"bword://{ref.text}"
with hf.element("a", href=target, attrib=attrib):
hf.write(ref.text or "")
def writeQuote(
self,
hf: "T_htmlfile",
elem: Element,
) -> None:
self.writeWithDirection(hf, elem, "div")
def writeTransCit(
self,
hf: "T_htmlfile",
elem: Element,
) -> None:
from lxml import etree as ET
quotes = []
sense = ET.Element(f"{tei}sense")
for child in elem.xpath("child::node()"):
if isinstance(child, str):
child = child.strip() # noqa: PLW2901
if child:
hf.write(child)
log.warning(f"text directly inside <cit>: {child}")
continue
if child.__class__.__name__ == "_Comment":
continue
if child.tag == f"{tei}quote":
quotes.append(child)
continue
if child.tag in {f"{tei}gramGrp", f"{tei}usg", f"{tei}note"}:
sense.append(child)
continue
if child.tag == f"{tei}cit":
# TODO
continue
log.warning(
f"unknown tag {child.tag!r} inside translation <cit>"
f": {self.tostring(child)}",
)
self.makeList(
hf,
quotes,
self.writeQuote,
single_prefix="",
)
if next(sense.iterchildren(), False) is not None:
self.writeSense(hf, sense)
def writeDef(
self,
hf: "T_htmlfile",
elem: Element,
) -> None:
# sep = None
# if self._cif_newline:
# sep = ET.Element("br")
count = 0
def writeChild(item: "str | Element", depth: int) -> None:
nonlocal count
if isinstance(item, str):
item = item.strip()
if not item:
return
if count > 0:
hf.write(self.getCommaSep(item))
# with hf.element(self.getTitleTag(item)):
hf.write(item)
return
if item.tag == f"{tei}ref":
if count > 0:
hf.write(self.getCommaSep(item.text))
self.writeRef(hf, item)
return
for child in item.xpath("child::node()"):
writeChild(child, depth + 1)
if depth < 1:
count += 1
for child in elem.xpath("child::node()"):
writeChild(child, 0)
def writeWithDirection(
self,
hf: "T_htmlfile",
child: Element,
tag: str,
) -> None:
attrib = dict(child.attrib)
try:
lang = attrib.pop(self.xmlLang)
except KeyError:
pass
else:
attrib["lang"] = lang
if self._auto_rtl:
langObj = langDict[lang]
if langObj:
if langObj.rtl:
attrib["dir"] = "rtl"
else:
attrib["dir"] = "ltr"
try:
_type = attrib.pop("type")
except KeyError:
pass
else:
if _type != "trans":
attrib["class"] = _type
with hf.element(tag, attrib=attrib):
self.writeRichText(hf, child)
def writeRichText(
self,
hf: "T_htmlfile",
el: Element,
) -> None:
from lxml import etree as ET
for child in el.xpath("child::node()"):
if isinstance(child, str):
hf.write(child)
continue
if child.tag == f"{tei}ref":
self.writeRef(hf, child)
continue
if child.tag == f"{tei}br":
hf.write(ET.Element("br"))
continue
if child.tag == f"{tei}p":
with hf.element("p", **child.attrib):
self.writeRichText(hf, child)
continue
if child.tag == f"{tei}div":
self.writeWithDirection(hf, child, "div")
continue
if child.tag == f"{tei}span":
self.writeWithDirection(hf, child, "span")
continue
self.writeRichText(hf, child)
def getLangDesc(self, elem: Element) -> str | None:
lang = elem.attrib.get(self.xmlLang)
if lang:
langObj = langDict[lang]
if not langObj:
log.warning(f"unknown lang {lang!r} in {self.tostring(elem)}")
return None
return langObj.name
orig = elem.attrib.get("orig")
if orig:
return orig
log.warning(f"unknown lang name in {self.tostring(elem)}")
return None
def writeLangTag(
self,
hf: "T_htmlfile",
elem: Element,
) -> None:
langDesc = self.getLangDesc(elem)
if not langDesc:
return
# TODO: make it Italic or change font color?
if elem.text:
hf.write(f"{langDesc}: {elem.text}")
else:
hf.write(f"{langDesc}")
def writeNote(
self,
hf: "T_htmlfile",
note: Element,
) -> None:
self.writeRichText(hf, note)
# TODO: break it down
# PLR0912 Too many branches (25 > 12)
def writeSenseSense( # noqa: PLR0912
self,
hf: "T_htmlfile",
sense: Element,
) -> int:
# this <sense> element can be 1st-level (directly under <entry>)
# or 2nd-level
transCits = []
defList = []
gramList = []
noteList = []
refList = []
usgList = []
xrList = []
exampleCits = []
for child in sense.iterchildren():
if child.tag == f"{tei}cit":
if child.attrib.get("type", "trans") == "trans":
transCits.append(child)
elif child.attrib.get("type") == "example":
exampleCits.append(child)
else:
log.warning(f"unknown cit type: {self.tostring(child)}")
continue
if child.tag == f"{tei}def":
defList.append(child)
continue
if child.tag == f"{tei}note":
_type = child.attrib.get("type")
if not _type:
noteList.append(child)
elif _type in {"pos", "gram"}:
gramList.append(child)
elif _type in {
"sense",
"stagr",
"stagk",
"def",
"usage",
"hint",
"status",
"editor",
"dom",
"infl",
"obj",
"lbl",
}:
noteList.append(child)
else:
log.warning(f"unknown note type {_type}")
noteList.append(child)
continue
if child.tag == f"{tei}ref":
refList.append(child)
continue
if child.tag == f"{tei}usg":
if not child.text:
log.warning(f"empty usg: {self.tostring(child)}")
continue
usgList.append(child)
continue
if child.tag == f"{tei}lang":
self.writeLangTag(hf, child)
continue
if child.tag in {f"{tei}sense", f"{tei}gramGrp"}:
continue
if child.tag == f"{tei}xr":
xrList.append(child)
continue
log.warning(f"unknown tag {child.tag} in <sense>")
self.makeList(
hf,
defList,
self.writeDef,
single_prefix="",
)
if gramList:
color = self._gram_color
attrib = {
"class": self.gramClass,
}
if color:
attrib["color"] = color
with hf.element("div"):
for i, gram in enumerate(gramList):
text = gram.text or ""
if i > 0:
hf.write(self.getCommaSep(text))
with hf.element("font", attrib=attrib):
hf.write(text)
self.makeList(
hf,
noteList,
self.writeNote,
single_prefix="",
)
self.makeList(
hf,
transCits,
self.writeTransCit,
single_prefix="",
)
if refList:
with hf.element("div"):
hf.write("Related: ")
for i, ref in enumerate(refList):
if i > 0:
hf.write(" | ")
self.writeRef(hf, ref)
if xrList:
for xr in xrList:
with hf.element("div"):
self.writeRichText(hf, xr)
if usgList:
with hf.element("div"):
hf.write("Usage: ")
for i, usg in enumerate(usgList):
text = usg.text or ""
if i > 0:
hf.write(self.getCommaSep(text))
hf.write(text)
if exampleCits:
for cit in exampleCits:
with hf.element(
"div",
attrib={
"class": "example",
"style": f"padding: {self._example_padding}px 0px;",
},
):
for quote in cit.findall("quote", self.ns):
self.writeWithDirection(hf, quote, "div")
for cit2 in cit.findall("cit", self.ns):
for quote in cit2.findall("quote", self.ns):
quote.attrib.update(cit2.attrib)
self.writeWithDirection(hf, quote, "div")
return len(transCits) + len(exampleCits)
def getCommaSep(self, sample: str) -> str:
if sample and self._auto_comma:
ws = getWritingSystemFromText(sample)
if ws:
return ws.comma
return self._comma
def writeGramGroups(
self,
hf: "T_htmlfile",
gramGrpList: list[Element],
) -> None:
from lxml import etree as ET
color = self._gram_color
attrib = {
"class": self.gramClass,
}
if color:
attrib["color"] = color
for gramGrp in gramGrpList:
parts = []
for child in gramGrp.iterchildren():
part = self.normalizeGramGrpChild(child)
if part:
parts.append(part)
if not parts:
continue
sep = self.getCommaSep(parts[0])
text = sep.join(parts)
with hf.element("font", attrib=attrib):
hf.write(text)
hf.write(ET.Element("br"))
def writeSenseGrams(
self,
hf: "T_htmlfile",
sense: Element,
) -> None:
self.writeGramGroups(hf, sense.findall("gramGrp", self.ns))
def writeSense(
self,
hf: "T_htmlfile",
sense: Element,
) -> None:
# this <sense> element is 1st-level (directly under <entry>)
self.writeSenseGrams(hf, sense)
self.makeList(
hf,
sense.findall("sense", self.ns),
self.writeSenseSense,
single_prefix="",
)
self.writeSenseSense(hf, sense)
def getDirection(self, elem: Element) -> str:
lang = elem.get(self.xmlLang)
if lang is None:
return ""
langObj = langDict[lang]
if langObj is None:
log.warning(f"unknown language {lang}")
return ""
if langObj.rtl:
return "rtl"
return ""
def writeSenseList(
self,
hf: "T_htmlfile",
senseList: list[Element],
) -> None:
# these <sense> elements are 1st-level (directly under <entry>)
if not senseList:
return
if self._auto_rtl and self.getDirection(senseList[0]) == "rtl":
with hf.element("div", dir="rtl"):
self.makeList(
hf,
senseList,
self.writeSense,
ordered=(len(senseList) > 3),
)
return
self.makeList(
hf,
senseList,
self.writeSense,
# list_type="A",
)
def normalizeGramGrpChild(self, elem: Element) -> str: # noqa: PLR0912
# child can be "pos" or "gen"
tag = elem.tag
text = elem.text
if not text:
return ""
text = text.strip()
if tag == f"{tei}pos":
return self.posMapping.get(text.lower(), text)
if tag == f"{tei}gen":
return self.genderMapping.get(text.lower(), text)
if tag in {f"{tei}num", f"{tei}number"}:
return self.numberMapping.get(text.lower(), text)
if tag == f"{tei}subc":
return self.subcMapping.get(text.lower(), text)
if tag == f"{tei}gram":
_type = elem.get("type")
if _type:
if _type == "pos":
return self.posMapping.get(text.lower(), text)
if _type == "gen":
return self.genderMapping.get(text.lower(), text)
if _type in {"num", "number"}:
return self.numberMapping.get(text.lower(), text)
if _type == "subc":
return self.subcMapping.get(text.lower(), text)
log.warning(f"unrecognize type={_type!r}: {self.tostring(elem)}")
return text
log.warning(f"<gram> with no type: {self.tostring(elem)}")
return text
if tag == f"{tei}note":
return text
if tag == f"{tei}colloc":
return ""
log.warning(
f"unrecognize GramGrp child tag: {elem.tag!r}: {self.tostring(elem)}",
)
return ""
def getEntryByElem( # noqa: PLR0912
self,
entry: Element,
) -> EntryType:
from lxml import etree as ET
glos = self._glos
keywords = []
f = BytesIO()
pron_color = self._pron_color
if self._discover:
for elem in entry.iter():
if elem.tag not in self.supportedTags:
self._discoveredTags[elem.tag] = elem
def br() -> Element:
return ET.Element("br")
inflectedKeywords = []
for form in entry.findall("form", self.ns):
inflected = form.get("type") == "infl"
for orth in form.findall("orth", self.ns):
if not orth.text:
continue
if inflected:
inflectedKeywords.append(orth.text)
else:
keywords.append(orth.text)
keywords += inflectedKeywords
pronList = [
pron.text.strip("/")
for pron in entry.findall("form/pron", self.ns)
if pron.text
]
senseList = entry.findall("sense", self.ns)
with ET.htmlfile(f, encoding="utf-8") as hf:
with hf.element("div"):
if self._word_title:
for keyword in keywords:
with hf.element(glos.titleTag(keyword)):
hf.write(keyword)
hf.write(br())
# TODO: "form/usg"
# <usg type="geo">Brit</usg>
# <usg type="geo">US</usg>
# <usg type="hint">...</usg>
if pronList:
for i, pron in enumerate(pronList):
if i > 0:
hf.write(self.getCommaSep(pron))
hf.write("/")
with hf.element("font", color=pron_color):
hf.write(f"{pron}")
hf.write("/")
hf.write(br())
hf.write("\n")
_hf = cast("T_htmlfile", hf)
self.writeGramGroups(_hf, entry.findall("gramGrp", self.ns))
self.writeSenseList(_hf, senseList)
defi = f.getvalue().decode("utf-8")
# defi = defi.replace("\xa0", " ") # do we need to do this?
_file = self._file
return self._glos.newEntry(
keywords,
defi,
defiFormat="h",
byteProgress=(_file.tell(), self._fileSize),
)
def setWordCount(self, header: Element) -> None:
extent_elem = header.find(".//extent", self.ns)
if extent_elem is None:
log.warning(
"did not find 'extent' tag in metedata, progress bar will not word",
)
return
extent = extent_elem.text or ""
if not extent.endswith(" headwords"):
log.warning(f"unexpected {extent=}")
return
try:
self._wordCount = int(extent.split(" ")[0].replace(",", ""))
except Exception:
log.exception(f"unexpected {extent=}")
@staticmethod
def tostring(elem: Element) -> str:
from lxml import etree as ET
return (
ET.tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
def stripParag(self, elem: Element) -> str:
text = self.tostring(elem)
text = self._p_pattern.sub("\\2", text)
return text # noqa: RET504
def stripParagList(
self,
elems: list[Element],
) -> str:
lines = []
for elem in elems:
for line in self.stripParag(elem).split("\n"):
line = line.strip() # noqa: PLW2901
if not line:
continue
lines.append(line)
return "\n".join(lines)
def setGlosInfo(self, key: str, value: str) -> None:
self._glos.setInfo(key, unescape_unicode(value))
def setCopyright(self, header: Element) -> None:
elems = header.findall(".//availability//p", self.ns)
if not elems:
log.warning("did not find copyright")
return
_copyright = self.stripParagList(elems)
_copyright = self.replaceRefLink(_copyright)
self.setGlosInfo("copyright", _copyright)
log.debug(f"Copyright: {_copyright!r}")
def setPublisher(self, header: Element) -> None:
elem = header.find(".//publisher", self.ns)
if elem is None or not elem.text:
log.warning("did not find publisher")
return
self.setGlosInfo("publisher", elem.text)
def setCreationTime(self, header: Element) -> None:
elem = header.find(".//publicationStmt/date", self.ns)
if elem is None or not elem.text:
return
self.setGlosInfo("creationTime", elem.text)
def replaceRefLink(self, text: str) -> str:
return self._ref_pattern.sub('<a href="\\1">\\2</a>', text)
def setDescription(self, header: Element) -> None:
elems = []
for tag in ("sourceDesc", "projectDesc"):
elems += header.findall(f".//{tag}//p", self.ns)
desc = self.stripParagList(elems)
if not desc:
return
website_list = []
for match in self._website_pattern.findall(desc):
if not match[1]:
continue
website_list.append(match[1])
if website_list:
website = " | ".join(website_list)
self.setGlosInfo("website", website)
desc = self._website_pattern.sub("", desc).strip()
log.debug(f"Website: {website}")
desc = self.replaceRefLink(desc)
self.setGlosInfo("description", desc)
log.debug(
"------------ Description: ------------\n"
f"{desc}\n"
"--------------------------------------",
)
def setMetadata(self, header: Element) -> None:
self.setWordCount(header)
title = header.find(".//title", self.ns)
if title is not None and title.text:
self.setGlosInfo("name", title.text)
edition = header.find(".//edition", self.ns)
if edition is not None and edition.text:
self.setGlosInfo("edition", edition.text)
self.setCopyright(header)
self.setPublisher(header)
self.setCreationTime(header)
self.setDescription(header)
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._dirname = ""
self._file: IOBase = nullBinaryIO
self._fileSize = 0
self._wordCount = 0
self._discoveredTags: "dict[str, Element]" = {}
self._p_pattern = re.compile(
"<p( [^<>]*?)?>(.*?)</p>",
re.DOTALL,
)
self._ref_pattern = re.compile(
'<ref target="(.*?)">(.*?)</ref>',
)
self._website_pattern = re.compile(
'Home: <(ref|ptr) target="(.*)">(.*)</\\1>',
)
def __len__(self) -> int:
return self._wordCount
def close(self) -> None:
self._file.close()
self._file = nullBinaryIO
self._filename = ""
self._fileSize = 0
def open(
self,
filename: str,
) -> None:
try:
from lxml import etree as ET
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
self._filename = filename
self._dirname = dirname(filename)
cfile = compressionOpen(filename, mode="rb")
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("FreeDict Reader: file is not seekable")
self._glos.setDefaultDefiFormat("h")
if self._word_title:
self._glos.setInfo("definition_has_headwords", "True")
context = ET.iterparse( # type: ignore # noqa: PGH003
cfile,
events=("end",),
tag=f"{tei}teiHeader",
)
for _, elem in context:
self.setMetadata(elem) # type: ignore
break
cfile.close()
def loadInclude(self, elem: Element) -> Reader | None:
href = elem.attrib.get("href")
if not href:
log.error(f"empty href in {elem}")
return None
filename = join(self._dirname, href)
if not isfile(filename):
log.error(f"no such file {filename!r} from {elem}")
return None
reader = Reader(self._glos)
for optName in optionsProp:
attr = "_" + optName
if hasattr(self, attr):
setattr(reader, attr, getattr(self, attr))
reader.open(filename)
return reader
def __iter__(self) -> Iterator[EntryType]:
from lxml import etree as ET
if self._auto_rtl is None:
glos = self._glos
if (glos.sourceLang and glos.sourceLang.rtl) or (
glos.targetLang and glos.targetLang.rtl
):
log.info("setting auto_rtl=True")
self._auto_rtl = True
self._file = compressionOpen(self._filename, mode="rb")
context = ET.iterparse( # type: ignore # noqa: PGH003
self._file,
events=("end",),
tag=(ENTRY, INCLUDE),
)
for _, _elem in context:
elem = cast("Element", _elem)
if elem.tag == INCLUDE:
reader = self.loadInclude(elem)
if reader is not None:
yield from reader
reader.close()
continue
yield self.getEntryByElem(elem)
# clean up preceding siblings to save memory
# this can reduce memory usage from 1 GB to ~25 MB
parent = elem.getparent()
if parent is None:
continue
while elem.getprevious() is not None:
del parent[0]
if self._discoveredTags:
log.info("Found unsupported tags")
for elem in self._discoveredTags.values():
log.info(f"{self.tostring(elem)}\n")
| 23,490
|
Python
|
.py
| 898
| 22.423163
| 73
| 0.654474
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,836
|
tabfile.py
|
ilius_pyglossary/pyglossary/plugins/tabfile.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Generator
from typing import TYPE_CHECKING
from pyglossary.compression import stdCompressions
from pyglossary.core import log
from pyglossary.option import (
BoolOption,
EncodingOption,
FileSizeOption,
Option,
)
from pyglossary.text_reader import TextGlossaryReader
from pyglossary.text_utils import (
splitByBarUnescapeNTB,
unescapeNTB,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = ["Reader"]
enable = True
lname = "tabfile"
format = "Tabfile"
description = "Tabfile (.txt, .dic)"
extensions = (".txt", ".tab", ".tsv")
extensionCreate = ".txt"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/Tab-separated_values"
website = None
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"enable_info": BoolOption(
comment="Enable glossary info / metedata",
),
"resources": BoolOption(
comment="Enable resources / data files",
),
"file_size_approx": FileSizeOption(
comment="Split up by given approximate file size\nexamples: 100m, 1g",
),
"word_title": BoolOption(
comment="Add headwords title to beginning of definition",
),
}
class Reader(TextGlossaryReader):
@classmethod
def isInfoWord(cls, word: str) -> bool:
return word.startswith("#")
@classmethod
def fixInfoWord(cls, word: str) -> str:
return word.lstrip("#")
def nextBlock(self) -> tuple[str | list[str], str, None] | None:
if not self._file:
raise StopIteration
line = self.readline()
if not line:
raise StopIteration
line = line.rstrip("\n")
if not line:
return None
###
word: "str | list[str]"
word, tab, defi = line.partition("\t")
if not tab:
log.warning(
f"Warning: line starting with {line[:10]!r} has no tab!",
)
return None
###
if self._glos.alts:
word = splitByBarUnescapeNTB(word)
if len(word) == 1:
word = word[0]
else:
word = unescapeNTB(word, bar=False)
###
defi = unescapeNTB(defi)
###
return word, defi, None
class Writer:
_encoding: str = "utf-8"
_enable_info: bool = True
_resources: bool = True
_file_size_approx: int = 0
_word_title: bool = False
compressions = stdCompressions
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
def open(
self,
filename: str,
) -> None:
self._filename = filename
def finish(self) -> None:
pass
def write(self) -> Generator[None, EntryType, None]:
from pyglossary.text_utils import escapeNTB, joinByBar
from pyglossary.text_writer import TextGlossaryWriter
writer = TextGlossaryWriter(
self._glos,
entryFmt="{word}\t{defi}\n",
writeInfo=self._enable_info,
outInfoKeysAliasDict=None,
)
writer.setAttrs(
encoding=self._encoding,
wordListEncodeFunc=joinByBar,
wordEscapeFunc=escapeNTB,
defiEscapeFunc=escapeNTB,
ext=".txt",
resources=self._resources,
word_title=self._word_title,
file_size_approx=self._file_size_approx,
)
writer.open(self._filename)
yield from writer.write()
writer.finish()
| 3,100
|
Python
|
.py
| 119
| 23.411765
| 72
| 0.723928
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,837
|
json_plugin.py
|
ilius_pyglossary/pyglossary/plugins/json_plugin.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Generator
from typing import TYPE_CHECKING
from pyglossary.compression import (
# compressionOpen,
stdCompressions,
)
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import (
EntryType,
GlossaryType,
)
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "json"
format = "Json"
description = "JSON (.json)"
extensions = (".json",)
extensionCreate = ".json"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/JSON"
website = (
"https://www.json.org/json-en.html",
"www.json.org",
)
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"enable_info": BoolOption(comment="Enable glossary info / metedata"),
"resources": BoolOption(comment="Enable resources / data files"),
"word_title": BoolOption(
comment="add headwords title to beginning of definition",
),
}
class Writer:
_encoding: str = "utf-8"
_enable_info: bool = True
_resources: bool = True
_word_title: bool = False
compressions = stdCompressions
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
glos.preventDuplicateWords()
def open(self, filename: str) -> None:
self._filename = filename
def finish(self) -> None:
self._filename = ""
def write(self) -> Generator[None, EntryType, None]:
from json import dumps
from pyglossary.text_writer import writeTxt
glos = self._glos
encoding = self._encoding
enable_info = self._enable_info
resources = self._resources
ensure_ascii = encoding == "ascii"
def escape(st: str) -> str:
return dumps(st, ensure_ascii=ensure_ascii)
yield from writeTxt(
glos,
entryFmt="\t{word}: {defi},\n",
filename=self._filename,
encoding=encoding,
writeInfo=enable_info,
wordEscapeFunc=escape,
defiEscapeFunc=escape,
ext=".json",
head="{\n",
tail='\t"": ""\n}',
resources=resources,
word_title=self._word_title,
)
| 2,176
|
Python
|
.py
| 91
| 21.505495
| 70
| 0.713111
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,838
|
wiktextract.py
|
ilius_pyglossary/pyglossary/plugins/wiktextract.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import collections
from io import BytesIO, IOBase
from json import loads as json_loads
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
from collections.abc import Callable, Iterator
from typing import Any
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element, T_htmlfile
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import exc_note, log, pip
from pyglossary.io_utils import nullBinaryIO
from pyglossary.option import (
BoolOption,
ListOption,
Option,
StrOption,
)
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "wiktextract"
format = "Wiktextract"
description = "Wiktextract (.jsonl)"
extensions = (".jsonl",)
extensionCreate = ".jsonl"
singleFile = True
kind = "text"
wiki = ""
website = (
"https://github.com/tatuylonen/wiktextract",
"@tatuylonen/wiktextract",
)
optionsProp: "dict[str, Option]" = {
"resources": BoolOption(
comment="Enable resources / data files",
),
"word_title": BoolOption(
comment="Add headwords title to beginning of definition",
),
"pron_color": StrOption(
comment="Pronunciation color",
),
"gram_color": StrOption(
comment="Grammar color",
),
"example_padding": StrOption(
comment="Padding for examples (css value)",
),
"audio": BoolOption(
comment="Enable audio",
),
"audio_formats": ListOption(
comment="List of audio formats to use",
),
}
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
_word_title: bool = False
_pron_color: str = "gray"
_gram_color: str = "green"
# 'top right' or 'top right bottom left'
_example_padding: str = "10px 20px"
_audio: bool = True
_audio_formats: list[str] = ["ogg", "mp3"]
topicStyle = (
"color:white;"
"background:green;"
"padding-left:3px;"
"padding-right:3px;"
"border-radius:0.5ex;"
# 0.5ex ~= 0.3em, but "ex" is recommended
)
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: IOBase = nullBinaryIO
self._fileSize = 0
self._wordCount = 0
def open(
self,
filename: str,
) -> None:
try:
pass
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
self._filename = filename
cfile = compressionOpen(filename, mode="rt", encoding="utf-8")
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
self.warning("Wiktextract Reader: file is not seekable")
self._glos.setDefaultDefiFormat("h")
if self._word_title:
self._glos.setInfo("definition_has_headwords", "True")
self._file = cfile
self._warnings = collections.Counter()
def close(self) -> None:
self._file.close()
self._file = nullBinaryIO
self._filename = ""
self._fileSize = 0
def __len__(self) -> int:
return 0
def __iter__(self) -> Iterator[EntryType]:
while line := self._file.readline():
line = line.strip()
if not line:
continue
yield self.makeEntry(json_loads(line))
for _msg, count in self._warnings.most_common():
msg = _msg
if count > 1:
msg = f"[{count} times] {msg}"
log.warning(msg)
def warning(self, msg):
self._warnings[msg] += 1
def makeEntry(self, data: "dict[str, Any]") -> EntryType:
from lxml import etree as ET
glos = self._glos
f = BytesIO()
def br() -> Element:
return ET.Element("br")
keywords = []
inflectedKeywords = []
word = data.get("word")
if word:
keywords.append(word)
for formDict in data.get("forms", []):
form: str = formDict.get("form", "")
if not form:
continue
if len(form) > 80:
self.warning(f"'form' too long: {form}")
continue
source: str = formDict.get("source", "")
# tags = formDict.get("tags", [])
if source == "Inflection":
inflectedKeywords.append(form)
else:
keywords.append(form)
keywords += inflectedKeywords
with ET.htmlfile(f, encoding="utf-8") as hf:
with hf.element("div"):
if self._word_title:
for keyword in keywords:
with hf.element(glos.titleTag(keyword)):
hf.write(keyword)
hf.write(br())
_hf = cast("T_htmlfile", hf)
self.writeSoundList(_hf, data.get("sounds"))
pos: "str | None" = data.get("pos")
if pos:
with hf.element("div", attrib={"class": "pos"}):
with hf.element("font", color=self._gram_color):
hf.write(pos)
self.writeSenseList(_hf, data.get("senses")) # type: ignore
self.writeSynonyms(_hf, data.get("synonyms")) # type: ignore
self.writeAntonyms(_hf, data.get("antonyms")) # type: ignore
# TODO: data.get("translations")
# list[dict[str, str]]
# dict keys: code, "lang", "sense", "word"
etymology: str = data.get("etymology_text", "")
if etymology:
with hf.element("div"):
hf.write(f"Etymology: {etymology}")
defi = f.getvalue().decode("utf-8")
# defi = defi.replace("\xa0", " ") # do we need to do this?
_file = self._file
return self._glos.newEntry(
keywords,
defi,
defiFormat="h",
byteProgress=(_file.tell(), self._fileSize),
)
def writeSoundPron(
self,
hf: "T_htmlfile",
sound: "dict[str, Any]",
) -> None:
# "homophone" key found in Dutch and Arabic dictionaries
# (similar-sounding words for Arabic)
for key in ("ipa", "other", "rhymes", "homophone"):
value = sound.get(key)
if not value:
continue
with hf.element("font", color=self._pron_color):
hf.write(f"{value}")
hf.write(f" ({key})")
def writeSoundAudio(
self,
hf: "T_htmlfile",
sound: "dict[str, Any]",
) -> None:
# TODO: add a read-option for audio
# keys for audio:
# "audio" (file name), "text" (link text), "ogg_url", "mp3_url"
# possible "tags" (list[str])
text = sound.get("text")
if text:
hf.write(f"{text}: ")
with hf.element("audio", attrib={"controls": ""}):
for _format in self._audio_formats:
url = sound.get(f"{_format}_url")
if not url:
continue
with hf.element(
"source",
attrib={
"src": url,
"type": f"audio/{_format}",
},
):
pass
def writeSoundList(
self,
hf: "T_htmlfile",
soundList: "list[dict[str, Any]] | None",
) -> None:
if not soundList:
return
pronList: "list[dict[str, Any]]" = []
audioList: "list[dict[str, Any]]" = []
for sound in soundList:
if "audio" in sound:
if self._audio:
audioList.append(sound)
continue
pronList.append(sound)
# can it contain both audio and pronunciation?
if pronList:
with hf.element("div", attrib={"class": "pronunciations"}):
for i, sound in enumerate(pronList):
if i > 0:
hf.write(", ")
self.writeSoundPron(hf, sound)
for sound in audioList:
with hf.element("div", attrib={"class": "audio"}):
self.writeSoundAudio(hf, sound)
def writeSenseList(
self,
hf: "T_htmlfile",
senseList: "list[dict[str, Any]]",
) -> None:
if not senseList:
return
self.makeList(
hf,
senseList,
self.writeSense,
)
def writeSenseGloss( # noqa: PLR6301
self,
hf: "T_htmlfile",
text: "str | None",
) -> None:
hf.write(text or "")
def writeSenseCategory( # noqa: PLR6301
self,
hf: "T_htmlfile",
category: "dict[str, Any]",
) -> None:
# keys: name: str, kind: str, parents: list, source: str
# values for "source" (that I found): "w", "w+disamb"
name = category.get("name")
if not name:
self.warning(f"{category = }")
return
desc = name
source = category.get("source")
if source:
desc = f"{desc} (source: {source})"
hf.write(desc)
def writeSenseCategories(
self,
hf: "T_htmlfile",
categories: "list[dict[str, Any]] | None",
) -> None:
if not categories:
return
# long names, mostly about grammar?
with hf.element("div", attrib={"class": "categories"}):
hf.write("Categories: ")
self.makeList(hf, categories, self.writeSenseCategory)
def writeSenseExample( # noqa: PLR6301, PLR0912
self,
hf: "T_htmlfile",
example: "dict[str, str]",
) -> None:
# example keys: text, "english", "ref", "type"
textList: list[tuple[str, str]] = []
_text = example.pop("example", "")
if _text:
textList.append((None, _text))
example.pop("ref", "")
example.pop("type", "")
for key, value in example.items():
if not value:
continue
prefix = key
if prefix in ("text",): # noqa: PLR6201, FURB171
prefix = None
if isinstance(value, str):
textList.append((prefix, value))
elif isinstance(value, list):
for item in value:
if isinstance(item, str):
textList.append((prefix, value))
elif isinstance(item, list):
textList += [(prefix, item2) for item2 in item]
else:
log.error(f"writeSenseExample: invalid type for {value=}")
if not textList:
return
def writePair(prefix: str, text: str):
if prefix:
with hf.element("b"):
hf.write(prefix)
hf.write(": ")
hf.write(text)
if len(textList) == 1:
prefix, text = textList[0]
writePair(prefix, text)
return
with hf.element("ul"):
for prefix, text in textList:
with hf.element("li"):
writePair(prefix, text)
def writeSenseExamples(
self,
hf: "T_htmlfile",
examples: "list[dict[str, str]] | None",
) -> None:
from lxml import etree as ET
if not examples:
return
hf.write(ET.Element("br"))
with hf.element("div", attrib={"class": "examples"}):
hf.write("Examples:")
hf.write(ET.Element("br"))
for example in examples:
with hf.element(
"div",
attrib={
"class": "example",
"style": f"padding: {self._example_padding};",
},
):
self.writeSenseExample(hf, example)
def writeSenseFormOf( # noqa: PLR6301
self,
hf: "T_htmlfile",
form_of: "dict[str, str]",
) -> None:
from lxml import etree as ET
# {"word": ..., "extra": ...}
word = form_of.get("word")
if not word:
return
hf.write(word)
extra = form_of.get("extra")
if extra:
hf.write(ET.Element("br"))
hf.write(extra)
def writeSenseFormOfList(
self,
hf: "T_htmlfile",
form_of_list: "list[dict[str, str]] | None",
) -> None:
if not form_of_list:
return
with hf.element("div", attrib={"class": "form_of"}):
hf.write("Form of: ")
self.makeList(hf, form_of_list, self.writeSenseFormOf)
def writeTags(
self,
hf: "T_htmlfile",
tags: "list[str] | None",
toRemove: "list[str] | None",
) -> None:
if not tags:
return
if toRemove:
for tag in toRemove:
if tag in tags:
tags.remove(tag)
if not tags:
return
with hf.element("div", attrib={"class": "tags"}):
for i, tag in enumerate(tags):
if i > 0:
hf.write(", ")
with hf.element("font", color=self._gram_color):
hf.write(tag)
def writeTopics(
self,
hf: "T_htmlfile",
topics: "list[str] | None",
) -> None:
if not topics:
return
with hf.element("div", attrib={"class": "tags"}):
for i, topic in enumerate(topics):
if i > 0:
hf.write(" ")
with hf.element("span", style=self.topicStyle):
hf.write(topic)
def addWordLink( # noqa: PLR6301
self,
hf: "T_htmlfile",
word: str,
wordClass: str = "",
) -> None:
i = word.find(" [")
if i >= 0:
word = word[:i]
if not word:
return
attrib = {"href": f"bword://{word}"}
if wordClass:
attrib["class"] = wordClass
with hf.element(
"a",
attrib=attrib,
):
hf.write(word)
def writeSynonyms(
self,
hf: "T_htmlfile",
synonyms: "list[dict[str, Any]] | None",
) -> None:
if not synonyms:
return
# "word": "str",
# "sense": "str",
# "_dis1": "str",
# "tags": list[str]
# "extra": "str",
# "english": "str"
with hf.element("div"):
hf.write("Synonyms: ")
for i, item in enumerate(synonyms):
if i > 0:
hf.write(", ")
word = item.get("word")
if not word:
continue
self.addWordLink(hf, word)
def writeAntonyms(
self,
hf: "T_htmlfile",
antonyms: "list[dict[str, str]] | None",
) -> None:
if not antonyms:
return
# dict keys: word
with hf.element("div"):
hf.write("Antonyms: ")
for i, item in enumerate(antonyms):
if i > 0:
hf.write(", ")
word = item.get("word")
if not word:
continue
self.addWordLink(hf, word, wordClass="antonym")
def writeRelated(
self,
hf: "T_htmlfile",
relatedList: "list[dict[str, str]] | None",
) -> None:
if not relatedList:
return
# dict keys: sense, "word", "english"
with hf.element("div"):
hf.write("Related: ")
for i, item in enumerate(relatedList):
if i > 0:
hf.write(", ")
word = item.get("word")
if not word:
continue
self.addWordLink(hf, word)
def writeSenseLinks(
self,
hf: "T_htmlfile",
linkList: "list[list[str]] | None",
) -> None:
if not linkList:
return
with hf.element("div"):
hf.write("Links: ")
for i, link in enumerate(linkList):
if len(link) != 2:
self.warning(f"unexpected {link =}")
continue
text, ref = link
sq = ref.find("#")
if sq == 0:
ref = text
elif sq > 0:
ref = ref[:sq]
if i > 0:
hf.write(", ")
self.addWordLink(hf, ref)
def writeSense(
self,
hf: "T_htmlfile",
sense: "dict[str, Any]",
) -> None:
from lxml import etree as ET
# tags seem to be mostly about grammar, so with format it like grammar
self.writeTags(
hf,
sense.get("tags"),
toRemove=["form-of"],
)
# for key in ("english",):
# text: "str | None" = sense.get("english")
# if not text:
# continue
# keyCap = key.capitalize()
# with hf.element("div"):
# with hf.element("b"):
# hf.write(keyCap)
# hf.write(f": {text}")
# sense["glosses"] and sense["english"] seems to be unreliable
# for example:
# "raw_glosses": ["(short) story, fable, play"],
# "english": "short",
# "glosses": ["story, fable, play"],
glosses: "list[str] | None" = sense.get("raw_glosses")
if not glosses:
glosses = sense.get("glosses")
if glosses:
self.makeList(hf, glosses, self.writeSenseGloss)
self.writeSenseCategories(hf, sense.get("categories"))
self.writeTopics(hf, sense.get("topics"))
self.writeSenseFormOfList(hf, sense.get("form_of"))
self.writeSynonyms(hf, sense.get("synonyms"))
self.writeAntonyms(hf, sense.get("antonyms"))
self.writeRelated(hf, sense.get("related"))
self.writeSenseLinks(hf, sense.get("links"))
self.writeSenseExamples(hf, sense.get("examples"))
# alt_of[i]["word"] seem to point to a word that is
# mentioned in sense["raw_glosses"]
# so we could try to find that word and turn it into a link
# sense.get("alt_of"): list[dict[str, str]] | None
# sense.get("wikipedia", []): list[str]
# sense.get("wikidata", []): list[str]
# sense.get("id", ""): str # not useful
# sense.get("senseid", []): list[str] # not useful
hf.write(ET.Element("br"))
@staticmethod
def makeList( # noqa: PLR0913
hf: "T_htmlfile",
input_objects: list[Any],
processor: Callable,
single_prefix: str = "",
skip_single: bool = True,
ordered: bool = True,
list_type: str = "",
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not input_objects:
return
if skip_single and len(input_objects) == 1:
if single_prefix:
hf.write(single_prefix)
processor(hf, input_objects[0])
return
attrib: "dict[str, str]" = {}
if list_type:
attrib["type"] = list_type
with hf.element("ol" if ordered else "ul", attrib=attrib):
for el in input_objects:
with hf.element("li"):
processor(hf, el)
| 15,768
|
Python
|
.py
| 602
| 22.61794
| 72
| 0.644986
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,839
|
ebook_kobo.py
|
ilius_pyglossary/pyglossary/plugins/ebook_kobo.py
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright © 2012-2016 Alberto Pettarin (alberto@albertopettarin.it)
# Copyright © 2022 Saeed Rasooli <saeed.gnu@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import re
import unicodedata
from collections.abc import Generator
from gzip import compress, decompress
from operator import itemgetter
from pathlib import Path
from pickle import dumps, loads
from typing import TYPE_CHECKING
from pyglossary import core
from pyglossary.core import exc_note, log, pip
from pyglossary.flags import NEVER
from pyglossary.os_utils import indir
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "kobo"
format = "Kobo"
description = "Kobo E-Reader Dictionary"
extensions = (".kobo",)
extensionCreate = ".kobo.zip"
singleFile = False
kind = "package"
sortOnWrite = NEVER
wiki = "https://en.wikipedia.org/wiki/Kobo_eReader"
website = (
"https://www.kobo.com",
"www.kobo.com",
)
# https://help.kobo.com/hc/en-us/articles/360017640093-Add-new-dictionaries-to-your-Kobo-eReader
optionsProp: "dict[str, Option]" = {}
# Penelope option: marisa_index_size=1000000
def is_cyrillic_char(c: str) -> bool:
# U+0400 - U+04FF: Cyrillic
# U+0500 - U+052F: Cyrillic Supplement
if "\u0400" <= c <= "\u052f":
return True
# U+2DE0 - U+2DFF: Cyrillic Extended-A
if "\u2de0" <= c <= "\u2dff":
return True
# U+A640 - U+A69F: Cyrillic Extended-B
if "\ua640" <= c <= "\ua69f":
return True
# U+1C80 - U+1C8F: Cyrillic Extended-C
if "\u1c80" <= c <= "\u1c8f":
return True
# U+FE2E, U+FE2F: Combining Half Marks
# U+1D2B, U+1D78: Phonetic Extensions
return c in {"\ufe2e", "\ufe2f", "\u1d2b", "\u1d78"}
def fixFilename(fname: str) -> str:
return Path(fname.replace("/", "2F").replace("\\", "5C")).name
class Writer:
WORDS_FILE_NAME = "words"
depends = {
"marisa_trie": "marisa-trie",
}
@staticmethod
def stripFullHtmlError(entry: EntryType, error: str) -> None:
log.error(f"error in stripFullHtml: {error}, words={entry.l_word!r}")
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._words: list[str] = []
self._img_pattern = re.compile(
'<img src="([^<>"]*?)"( [^<>]*?)?>',
re.DOTALL,
)
# img tag has no closing
glos.stripFullHtml(errorHandler=self.stripFullHtmlError)
def get_prefix(self, word: str) -> str: # noqa: PLR6301
if not word:
return "11"
wo = word[:2].strip().lower()
if not wo:
return "11"
if wo[0] == "\x00":
return "11"
if len(wo) > 1 and wo[1] == "\x00":
wo = wo[:1]
if is_cyrillic_char(wo[0]):
return wo
# if either of the first 2 chars are not unicode letters, return "11"
for c in wo:
if not unicodedata.category(c).startswith("L"):
return "11"
return wo.ljust(2, "a")
def fix_defi(self, defi: str) -> str:
# @pgaskin on #219: Kobo supports images in dictionaries,
# but these have a lot of gotchas
# (see https://pgaskin.net/dictutil/dicthtml/format.html).
# Basically, The best way to do it is to encode the images as a
# base64 data URL after shrinking it and making it grayscale
# (if it's JPG, this is as simple as only keeping the Y channel)
# for now we just skip data entries and remove '<img' tags
return self._img_pattern.sub("[Image: \\1]", defi)
def write_groups(self) -> Generator[None, EntryType, None]:
import gzip
from collections import OrderedDict
dataEntryCount = 0
htmlHeader = '<?xml version="1.0" encoding="utf-8"?><html>\n'
groupCounter = 0
htmlContents = htmlHeader
def writeGroup(lastPrefix: str) -> None:
nonlocal htmlContents
group_fname = fixFilename(lastPrefix)
htmlContents += "</html>"
core.trace(
log,
f"writeGroup: {lastPrefix!r}, "
f"{group_fname!r}, count={groupCounter}",
)
with gzip.open(group_fname + ".html", mode="wb") as gzipFile:
gzipFile.write(htmlContents.encode("utf-8"))
htmlContents = htmlHeader
allWords = []
data = []
while True:
entry = yield
if entry is None:
break
if entry.isData():
dataEntryCount += 1
continue
l_word = entry.l_word
allWords += l_word
wordsByPrefix: "dict[str, list[str]]" = OrderedDict()
for word in l_word:
prefix = self.get_prefix(word)
if prefix in wordsByPrefix:
wordsByPrefix[prefix].append(word)
else:
wordsByPrefix[prefix] = [word]
defi = self.fix_defi(entry.defi)
mainHeadword = l_word[0]
for prefix, p_words in wordsByPrefix.items():
headword, *variants = p_words
if headword != mainHeadword:
headword = f"{mainHeadword}, {headword}"
data.append(
(
prefix,
compress(
dumps(
(
headword,
variants,
defi,
),
),
),
),
)
del entry
log.info("Kobo: sorting entries...")
data.sort(key=itemgetter(0))
log.info("Kobo: writing entries...")
lastPrefix = ""
for prefix, row in data:
headword, variants, defi = loads(decompress(row))
if lastPrefix and prefix != lastPrefix:
writeGroup(lastPrefix)
groupCounter = 0
lastPrefix = prefix
htmlVariants = "".join(
f'<variant name="{v.strip().lower()}"/>' for v in variants
)
body = f"<div><b>{headword}</b><var>{htmlVariants}</var><br/>{defi}</div>"
htmlContents += f'<w><a name="{headword}" />{body}</w>\n'
groupCounter += 1
del data
if groupCounter > 0:
writeGroup(lastPrefix)
if dataEntryCount > 0:
log.warning(
f"ignored {dataEntryCount} files (data entries)"
" and replaced '<img ...' tags in definitions with placeholders",
)
self._words = allWords
def open(self, filename: str) -> None:
try:
import marisa_trie # type: ignore # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install marisa-trie` to install")
raise
self._filename = filename
def write(self) -> Generator[None, EntryType, None]:
with indir(self._filename, create=True):
yield from self.write_groups()
def finish(self) -> None:
import marisa_trie
with indir(self._filename, create=False):
trie = marisa_trie.Trie(self._words)
trie.save(self.WORDS_FILE_NAME)
self._filename = ""
| 7,459
|
Python
|
.py
| 228
| 29.486842
| 96
| 0.697843
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,840
|
stardict_textual.py
|
ilius_pyglossary/pyglossary/plugins/stardict_textual.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
from os.path import dirname, isdir, join
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import io
from collections.abc import Generator, Iterator
from lxml import builder
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element
from pyglossary.xdxf.transform import XdxfTransformer
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import exc_note, log, pip
from pyglossary.html_utils import unescape_unicode
from pyglossary.io_utils import nullBinaryIO
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "stardict_textual"
format = "StardictTextual"
description = "StarDict Textual File (.xml)"
extensions = ()
extensionCreate = ".xml"
sortKeyName = "stardict"
singleFile = True
kind = "text"
wiki = ""
website = (
"https://github.com/huzheng001/stardict-3"
"/blob/master/dict/doc/TextualDictionaryFileFormat",
"TextualDictionaryFileFormat",
)
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"xdxf_to_html": BoolOption(
comment="Convert XDXF entries to HTML",
),
}
class Reader:
_encoding: str = "utf-8"
_xdxf_to_html: bool = True
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: "io.IOBase" = nullBinaryIO
self._fileSize = 0
self._xdxfTr: "XdxfTransformer | None" = None
def xdxf_setup(self) -> XdxfTransformer:
from pyglossary.xdxf.transform import XdxfTransformer
self._xdxfTr = tr = XdxfTransformer(encoding="utf-8")
return tr
def xdxf_transform(self, text: str) -> str:
tr = self._xdxfTr
if tr is None:
tr = self.xdxf_setup()
return tr.transformByInnerString(text)
def __len__(self) -> int:
return 0
def close(self) -> None:
self._file.close()
self._file = nullBinaryIO
self._filename = ""
self._fileSize = 0
def open(self, filename: str) -> None:
try:
from lxml import etree as ET
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
self._filename = filename
cfile = compressionOpen(filename, mode="rb")
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
# self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("StarDict Textual File Reader: file is not seekable")
context = ET.iterparse( # type: ignore # noqa: PGH003
cfile,
events=("end",),
tag="info",
)
for _, elem in context:
self.setMetadata(elem) # type: ignore
break
cfile.close()
def setGlosInfo(self, key: str, value: str) -> None:
if value is None:
return
self._glos.setInfo(key, unescape_unicode(value))
def setMetadata(self, header: Element) -> None:
if (elem := header.find("./bookname")) is not None and elem.text:
self.setGlosInfo("name", elem.text)
if (elem := header.find("./author")) is not None and elem.text:
self.setGlosInfo("author", elem.text)
if (elem := header.find("./email")) is not None and elem.text:
self.setGlosInfo("email", elem.text)
if (elem := header.find("./website")) is not None and elem.text:
self.setGlosInfo("website", elem.text)
if (elem := header.find("./description")) is not None and elem.text:
self.setGlosInfo("description", elem.text)
if (elem := header.find("./bookname")) is not None and elem.text:
self.setGlosInfo("name", elem.text)
if (elem := header.find("./bookname")) is not None and elem.text:
self.setGlosInfo("name", elem.text)
if (elem := header.find("./date")) is not None and elem.text:
self.setGlosInfo("creationTime", elem.text)
# if (elem := header.find("./dicttype")) is not None and elem.text:
# self.setGlosInfo("dicttype", elem.text)
def renderDefiList(
self,
defisWithFormat: "list[tuple[str, str]]",
) -> tuple[str, str]:
if not defisWithFormat:
return "", ""
if len(defisWithFormat) == 1:
return defisWithFormat[0]
defiFormatSet: set[str] = set()
defiFormatSet.update(_type for _, _type in defisWithFormat)
if len(defiFormatSet) == 1:
defis = [_defi for _defi, _ in defisWithFormat]
_format = defiFormatSet.pop()
if _format == "h":
return "\n<hr>".join(defis), _format
return "\n".join(defis), _format
# convert plaintext or xdxf to html
defis = []
for _defi, _format in defisWithFormat:
if _format == "m":
_defi = _defi.replace("\n", "<br/>")
_defi = f"<pre>{_defi}</pre>"
elif _format == "x":
_defi = self.xdxf_transform(_defi)
defis.append(_defi)
return "\n<hr>\n".join(defis), "h"
def __iter__(self) -> Iterator[EntryType]:
from lxml import etree as ET
glos = self._glos
fileSize = self._fileSize
self._file = _file = compressionOpen(self._filename, mode="rb")
context = ET.iterparse( # type: ignore # noqa: PGH003
self._file,
events=("end",),
tag="article",
)
for _, _elem in context:
elem = cast("Element", _elem)
words = []
defisWithFormat = []
for child in elem.getchildren():
if not child.text:
continue
if child.tag in {"key", "synonym"}:
words.append(child.text)
elif child.tag == "definition":
_type = child.attrib.get("type", "")
if _type:
new_type = {
"m": "m",
"t": "m",
"y": "m",
"g": "h",
"h": "h",
"x": "x",
}.get(_type, "")
if not new_type:
log.warning(f"unsupported definition type {_type}")
_type = new_type
if not _type:
_type = "m"
_defi = child.text.strip()
if _type == "x" and self._xdxf_to_html:
_defi = self.xdxf_transform(_defi)
_type = "h"
defisWithFormat.append((_defi, _type))
# TODO: child.tag == "definition-r"
else:
log.warning(f"unknown tag {child.tag}")
defi, defiFormat = self.renderDefiList(defisWithFormat)
yield glos.newEntry(
words,
defi,
defiFormat=defiFormat,
byteProgress=(_file.tell(), fileSize),
)
# clean up preceding siblings to save memory
# this can reduce memory usage from >300 MB to ~25 MB
while elem.getprevious() is not None:
parent = elem.getparent()
if parent is None:
break
del parent[0]
class Writer:
_encoding: str = "utf-8"
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._resDir = ""
def open(
self,
filename: str,
) -> None:
self._filename = filename
self._resDir = join(dirname(self._filename), "res")
self._file = compressionOpen(
self._filename,
mode="w",
encoding=self._encoding,
)
def finish(self) -> None:
self._file.close()
def writeInfo(
self,
maker: "builder.ElementMaker",
pretty: bool,
) -> None:
from lxml import etree as ET
glos = self._glos
desc = glos.getInfo("description")
_copyright = glos.getInfo("copyright")
if _copyright:
desc = f"{_copyright}\n{desc}"
publisher = glos.getInfo("publisher")
if publisher:
desc = f"Publisher: {publisher}\n{desc}"
info = maker.info(
maker.version("3.0.0"),
maker.bookname(glos.getInfo("name")),
maker.author(glos.getInfo("author")),
maker.email(glos.getInfo("email")),
maker.website(glos.getInfo("website")),
maker.description(desc),
maker.date(glos.getInfo("creationTime")),
maker.dicttype(""),
)
_file = self._file
_file.write(
cast(
bytes,
ET.tostring(
info,
encoding=self._encoding,
pretty_print=pretty,
),
).decode(self._encoding)
+ "\n",
)
def writeDataEntry(
self,
maker: "builder.ElementMaker", # noqa: ARG002
entry: EntryType,
) -> None:
entry.save(self._resDir)
# TODO: create article tag with "definition-r" in it?
# or just save the file to res/ directory? or both?
# article = maker.article(
# maker.key(entry.s_word),
# maker.definition_r(
# ET.CDATA(entry.defi),
# **{"type": ext})
# )
# )
def write(self) -> Generator[None, EntryType, None]:
from lxml import builder
from lxml import etree as ET
_file = self._file
encoding = self._encoding
maker = builder.ElementMaker()
_file.write(
"""<?xml version="1.0" encoding="UTF-8" ?>
<stardict xmlns:xi="http://www.w3.org/2003/XInclude">
""",
)
self.writeInfo(maker, pretty=True)
if not isdir(self._resDir):
os.mkdir(self._resDir)
pretty = True
while True:
entry = yield
if entry is None:
break
if entry.isData():
self.writeDataEntry(maker, entry)
continue
entry.detectDefiFormat()
article = maker.article(
maker.key(entry.l_word[0]),
)
for alt in entry.l_word[1:]:
article.append(maker.synonym(alt))
article.append(
maker.definition(
ET.CDATA(entry.defi),
type=entry.defiFormat,
),
)
ET.indent(article, space="")
articleStr = cast(
bytes,
ET.tostring(
article,
pretty_print=pretty,
encoding=encoding,
),
).decode(encoding)
# for some reason, "´k" becomes " ́k" (for example) # noqa: RUF003
# stardict-text2bin tool also does this.
# https://en.wiktionary.org/wiki/%CB%88#Translingual
self._file.write(articleStr + "\n")
_file.write("</stardict>")
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
| 9,605
|
Python
|
.py
| 343
| 24.443149
| 70
| 0.670109
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,841
|
babylon_bdc.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bdc.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyglossary.option import Option
__all__ = [
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = False
lname = "babylon_bdc"
format = "BabylonBdc"
description = "Babylon (bdc)"
extensions = (".bdc",)
extensionCreate = ""
singleFile = True
kind = "binary"
wiki = ""
website = None
optionsProp: dict[str, Option] = {}
| 539
|
Python
|
.py
| 29
| 17
| 37
| 0.706931
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,842
|
dict_cc_split.py
|
ilius_pyglossary/pyglossary/plugins/dict_cc_split.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import html
from collections.abc import Iterator
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import sqlite3
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
from pyglossary.core import log
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dict_cc_split"
format = "Dictcc_split"
description = "Dict.cc (SQLite3) - Split"
extensions = ()
extensionCreate = ".db"
singleFile = True
kind = "binary"
wiki = "https://en.wikipedia.org/wiki/Dict.cc"
website = (
"https://play.google.com/store/apps/details?id=cc.dict.dictcc",
"dict.cc dictionary - Google Play",
)
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
self._con = connect(filename)
self._cur = self._con.cursor()
self._glos.setDefaultDefiFormat("m")
def __len__(self) -> int:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute("select count(*) * 2 from main_ft")
return self._cur.fetchone()[0]
def iterRows(
self,
column1: str,
column2: str,
) -> Iterator[tuple[str, str, str]]:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute(
f"select {column1}, {column2}, entry_type from main_ft"
f" order by {column1}",
)
for row in self._cur.fetchall():
term1 = row[0]
term2 = row[1]
try:
term1 = html.unescape(term1)
except Exception as e:
log.error(f"html.unescape({term1!r}) -> {e}")
try:
term2 = html.unescape(term2)
except Exception as e:
log.error(f"html.unescape({term2!r}) -> {e}")
yield term1, term2, row[2]
def _iterOneDirection(
self,
column1: str,
column2: str,
) -> Iterator[EntryType]:
for word, defi, entry_type in self.iterRows(column1, column2):
if entry_type:
word = f"{word} {{{entry_type}}}" # noqa: PLW2901
yield self._glos.newEntry(word, defi, defiFormat="m")
def __iter__(self) -> Iterator[EntryType]:
yield from self._iterOneDirection("term1", "term2")
yield from self._iterOneDirection("term2", "term1")
def close(self) -> None:
if self._cur:
self._cur.close()
if self._con:
self._con.close()
self._clear()
| 2,641
|
Python
|
.py
| 98
| 24.255102
| 64
| 0.686981
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,843
|
ebook_kobo_dictfile.py
|
ilius_pyglossary/pyglossary/plugins/ebook_kobo_dictfile.py
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright © 2020-2021 Saeed Rasooli <saeed.gnu@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import os
from collections.abc import Generator
from os.path import isdir
from typing import TYPE_CHECKING
from pyglossary.core import exc_note, log, pip
from pyglossary.image_utils import extractInlineHtmlImages
from pyglossary.io_utils import nullTextIO
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
from pyglossary.text_reader import TextGlossaryReader
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "kobo_dictfile"
format = "Dictfile"
description = "Kobo E-Reader Dictfile (.df)"
extensions = (".df",)
extensionCreate = ".df"
singleFile = True
kind = "text"
wiki = ""
website = (
"https://pgaskin.net/dictutil/dictgen/#dictfile-format",
"dictgen - dictutil",
)
# https://github.com/pgaskin/dictutil
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"extract_inline_images": BoolOption(comment="Extract inline images"),
}
def fixWord(word: str) -> str:
return word.replace("\n", " ")
def escapeDefi(defi: str) -> str:
return defi.replace("\n@", "\n @").replace("\n:", "\n :").replace("\n&", "\n &")
class Reader(TextGlossaryReader):
depends = {
"mistune": "mistune==3.0.1",
}
_extract_inline_images: bool = True
def __init__(self, glos: GlossaryType) -> None:
TextGlossaryReader.__init__(self, glos, hasInfo=False)
def open(self, filename: str) -> None:
try:
import mistune # type: ignore # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install mistune` to install")
raise
TextGlossaryReader.open(self, filename)
self._glos.setDefaultDefiFormat("h")
@classmethod
def isInfoWord(cls, _word: str) -> bool:
return False
@classmethod
def fixInfoWord(cls, _word: str) -> str:
raise NotImplementedError
def fixDefi(
self,
defi: str,
html: bool,
) -> tuple[str, list[tuple[str, str]] | None]:
import mistune
defi = (
defi.replace("\n @", "\n@")
.replace("\n :", "\n:")
.replace("\n &", "\n&")
.replace("</p><br />", "</p>")
.replace("</p><br/>", "</p>")
.replace("</p></br>", "</p>")
)
defi = defi.strip()
if html:
pass
else:
defi = mistune.html(defi)
images: "list[tuple[str, str]] | None" = None
if self._extract_inline_images:
defi, images = extractInlineHtmlImages(
defi,
self._glos.tmpDataDir,
fnamePrefix="", # maybe f"{self._pos:06d}-"
)
return defi, images
def nextBlock(
self,
) -> tuple[list[str], str, list[tuple[str, str]] | None]:
words: list[str] = []
defiLines: list[str] = []
html = False
while True:
line = self.readline()
if not line:
break
line = line.rstrip("\n\r")
if line.startswith("@"):
if words:
self._bufferLine = line
defi, images = self.fixDefi("\n".join(defiLines), html=html)
return words, defi, images
words = [line[1:].strip()]
continue
if line.startswith(": "):
defiLines.append(line[2:])
continue
if line.startswith("::"):
continue
if line.startswith("&"):
words.append(line[1:].strip())
continue
if line.startswith("<html>"):
line = line[6:]
html = True
defiLines.append(line)
if words:
defi, images = self.fixDefi("\n".join(defiLines), html=html)
return words, defi, images
raise StopIteration
class Writer:
_encoding: str = "utf-8"
@staticmethod
def stripFullHtmlError(entry: EntryType, error: str) -> None:
log.error(f"error in stripFullHtml: {error}, words={entry.l_word!r}")
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._file: "io.TextIOBase" = nullTextIO
glos.stripFullHtml(errorHandler=self.stripFullHtmlError)
def finish(self) -> None:
self._file.close()
if not os.listdir(self._resDir):
os.rmdir(self._resDir)
def open(self, filename: str) -> None:
self._file = open(filename, "w", encoding=self._encoding)
# dictgen's ParseDictFile does not seem to support glossary info / metedata
self._resDir = filename + "_res"
if not isdir(self._resDir):
os.mkdir(self._resDir)
def write(
self,
) -> Generator[None, EntryType, None]:
fileObj = self._file
resDir = self._resDir
while True:
entry = yield
if entry is None:
break
if entry.isData():
entry.save(resDir)
continue
words = entry.l_word
defi = entry.defi
entry.detectDefiFormat()
if entry.defiFormat == "h":
defi = f"<html>{entry.defi}"
fileObj.write(f"@ {fixWord(words[0])}\n")
for alt in words[1:]:
fileObj.write(f"& {fixWord(alt)}\n")
fileObj.write(f"{escapeDefi(defi)}\n\n")
| 5,944
|
Python
|
.py
| 194
| 27.78866
| 81
| 0.700717
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,844
|
dikt_json.py
|
ilius_pyglossary/pyglossary/plugins/dikt_json.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
# from https://github.com/maxim-saplin/pyglossary
from __future__ import annotations
import re
from collections.abc import Generator
from typing import TYPE_CHECKING
from pyglossary.compression import (
# compressionOpen,
stdCompressions,
)
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dikt_json"
format = "DiktJson"
description = "DIKT JSON (.json)"
extensions = ()
extensionCreate = ".json"
singleFile = True
kind = "text"
wiki = ""
website = "https://github.com/maxim-saplin/dikt"
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"enable_info": BoolOption(comment="Enable glossary info / metedata"),
"resources": BoolOption(comment="Enable resources / data files"),
"word_title": BoolOption(
comment="add headwords title to beginning of definition",
),
}
class Writer:
_encoding: str = "utf-8"
_enable_info: bool = True
_resources: bool = True
_word_title: bool = False
compressions = stdCompressions
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = None
glos.preventDuplicateWords()
def open(self, filename: str) -> None:
self._filename = filename
def finish(self) -> None:
self._filename = None
def write(self) -> Generator[None, EntryType, None]:
from json import dumps
from pyglossary.text_writer import writeTxt
glos = self._glos
encoding = self._encoding
enable_info = self._enable_info
resources = self._resources
ensure_ascii = encoding == "ascii"
def escape(st: str) -> str:
# remove styling from HTML tags
st2 = re.sub(r' style="[^"]*"', "", st)
st2 = re.sub(r' class="[^"]*"', "", st2)
st2 = re.sub(r"<font [^>]*>", "", st2)
st2 = re.sub(r"</font>", "", st2)
st2 = re.sub(r"\n", "", st2)
st2 = re.sub(r"<div></div>", "", st2)
st2 = re.sub(r"<span></span>", "", st2)
# fix russian dictionary issues,
# such as hyphenation in word (e.g. абб{[']}а{[/']}т)
st2 = re.sub(r"\{\['\]\}", "", st2)
st2 = re.sub(r"\{\[/'\]\}", "", st2)
return dumps(st2, ensure_ascii=ensure_ascii)
yield from writeTxt(
glos,
entryFmt="\t{word}: {defi},\n",
filename=self._filename,
encoding=encoding,
writeInfo=enable_info,
wordEscapeFunc=escape,
defiEscapeFunc=escape,
ext=".json",
head="{\n",
tail='\t"": ""\n}',
resources=resources,
word_title=self._word_title,
)
| 2,699
|
Python
|
.py
| 100
| 24.37
| 70
| 0.676242
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,845
|
gettext_po.py
|
ilius_pyglossary/pyglossary/plugins/gettext_po.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import os
from collections.abc import Generator, Iterator
from os.path import isdir
from typing import TYPE_CHECKING
from pyglossary.core import exc_note, log, pip
from pyglossary.io_utils import nullTextIO
from pyglossary.option import (
BoolOption,
Option,
)
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "gettext_po"
format = "GettextPo"
description = "Gettext Source (.po)"
extensions = (".po",)
extensionCreate = ".po"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/Gettext"
website = (
"https://www.gnu.org/software/gettext",
"gettext - GNU Project",
)
optionsProp: "dict[str, Option]" = {
"resources": BoolOption(comment="Enable resources / data files"),
}
class Reader:
depends = {
"polib": "polib",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self.clear()
def clear(self) -> None:
self._filename = ""
self._file: "io.TextIOBase" = nullTextIO
self._wordCount: "int | None" = None
self._resDir = ""
self._resFileNames: list[str] = []
def open(self, filename: str) -> None:
self._filename = filename
self._file = open(filename, encoding="utf-8")
self._resDir = filename + "_res"
if isdir(self._resDir):
self._resFileNames = os.listdir(self._resDir)
else:
self._resDir = ""
self._resFileNames = []
def close(self) -> None:
self._file.close()
self._file = nullTextIO
self.clear()
def __len__(self) -> int:
from pyglossary.file_utils import fileCountLines
if self._wordCount is None:
log.debug("Try not to use len(reader) as it takes extra time")
self._wordCount = fileCountLines(
self._filename,
newline=b"\nmsgid",
)
return self._wordCount
def __iter__(self) -> Iterator[EntryType]: # noqa: PLR0912
try:
from polib import unescape as po_unescape
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install polib` to install")
raise
_file = self._file
word = ""
defi = ""
msgstr = False
wordCount = 0
for line in _file:
line = line.strip() # noqa: PLW2901
if not line:
continue
if line.startswith("#"):
continue
if line.startswith("msgid "):
if word:
yield self._glos.newEntry(word, defi)
wordCount += 1
word = ""
defi = ""
else:
pass
# TODO: parse defi and set glos info?
# but this should be done in self.open
word = po_unescape(line[6:])
msgstr = False
elif line.startswith("msgstr "):
if msgstr:
log.error("msgid omitted!")
defi = po_unescape(line[7:])
msgstr = True
elif msgstr:
defi += po_unescape(line)
else:
word += po_unescape(line)
if word:
yield self._glos.newEntry(word, defi)
wordCount += 1
self._wordCount = wordCount
class Writer:
depends = {
"polib": "polib",
}
_resources: bool = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: "io.TextIOBase" = nullTextIO
def open(self, filename: str) -> None:
self._filename = filename
self._file = _file = open(filename, mode="w", encoding="utf-8")
_file.write('#\nmsgid ""\nmsgstr ""\n')
for key, value in self._glos.iterInfo():
_file.write(f'"{key}: {value}\\n"\n')
def finish(self) -> None:
self._filename = ""
self._file.close()
self._file = nullTextIO
def write(self) -> Generator[None, EntryType, None]:
try:
from polib import escape as po_escape
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install polib` to install")
raise
_file = self._file
resources = self._resources
filename = self._filename
while True:
entry = yield
if entry is None:
break
if entry.isData():
if resources:
entry.save(filename + "_res")
continue
_file.write(
f"msgid {po_escape(entry.s_word)}\n"
f"msgstr {po_escape(entry.defi)}\n\n",
)
| 4,143
|
Python
|
.py
| 163
| 22.257669
| 66
| 0.672654
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,846
|
ebook_mobi.py
|
ilius_pyglossary/pyglossary/plugins/ebook_mobi.py
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright © 2012-2016 Alberto Pettarin (alberto@albertopettarin.it)
# Copyright © 2016-2022 Saeed Rasooli <saeed.gnu@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import os
from collections.abc import Generator
from datetime import datetime
from os.path import join
from typing import TYPE_CHECKING
from pyglossary.core import log
from pyglossary.ebook_base import EbookWriter
from pyglossary.flags import DEFAULT_YES
from pyglossary.langs import Lang
from pyglossary.option import (
BoolOption,
FileSizeOption,
IntOption,
Option,
StrOption,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "mobi"
format = "Mobi"
description = "Mobipocket (.mobi) E-Book"
extensions = (".mobi",)
extensionCreate = ".mobi"
singleFile = False
sortOnWrite = DEFAULT_YES
sortKeyName = "ebook"
kind = "package"
wiki = "https://en.wikipedia.org/wiki/Mobipocket"
website = None
optionsProp: "dict[str, Option]" = {
"group_by_prefix_length": IntOption(
comment="Prefix length for grouping",
),
# "group_by_prefix_merge_min_size": IntOption(),
# "group_by_prefix_merge_across_first": BoolOption(),
# specific to mobi
"kindlegen_path": StrOption(
comment="Path to kindlegen executable",
),
"compress": BoolOption(
disabled=True,
comment="Enable compression",
),
"keep": BoolOption(
comment="Keep temp files",
),
"include_index_page": BoolOption(
disabled=True,
comment="Include index page",
),
"css": StrOption(
# disabled=True,
comment="Path to css file",
),
"cover_path": StrOption(
# disabled=True,
comment="Path to cover file",
),
"file_size_approx": FileSizeOption(
comment="Approximate size of each xhtml file (example: 200kb)",
),
"hide_word_index": BoolOption(
comment="Hide headword in tap-to-check interface",
),
"spellcheck": BoolOption(
comment="Enable wildcard search and spell correction during word lookup",
# "Maybe it just enables the kindlegen's spellcheck."
),
"exact": BoolOption(
comment="Exact-match Parameter",
# "I guess it only works for inflections"
),
}
extraDocs = [
(
"Other Requirements",
"Install [KindleGen](https://wiki.mobileread.com/wiki/KindleGen)"
" for creating Mobipocket e-books.",
),
]
class GroupStateBySize:
def __init__(self, writer: Writer) -> None:
self.writer = writer
self.group_index = -1
self.reset()
def reset(self) -> None:
self.group_contents: list[str] = []
self.group_size = 0
def add(self, entry: EntryType) -> None:
defi = entry.defi
content = self.writer.format_group_content(
entry.l_word[0],
defi,
variants=entry.l_word[1:],
)
self.group_contents.append(content)
self.group_size += len(content.encode("utf-8"))
class Writer(EbookWriter):
_compress: bool = False
_keep: bool = False
_kindlegen_path: str = ""
_file_size_approx: int = 271360
_hide_word_index: bool = False
_spellcheck: bool = True
_exact: bool = False
CSS_CONTENTS = b""""@charset "UTF-8";"""
GROUP_XHTML_TEMPLATE = """<?xml version="1.0" encoding="utf-8" \
standalone="no"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" \
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns:cx=\
"https://kindlegen.s3.amazonaws.com/AmazonKindlePublishingGuidelines.pdf" \
xmlns:dc="http://purl.org/dc/elements/1.1/" \
xmlns:idx="https://kindlegen.s3.amazonaws.com\
/AmazonKindlePublishingGuidelines.pdf" \
xmlns:math="http://exslt.org/math" \
xmlns:mbp="https://kindlegen.s3.amazonaws.com\
/AmazonKindlePublishingGuidelines.pdf" \
xmlns:mmc="https://kindlegen.s3.amazonaws.com\
/AmazonKindlePublishingGuidelines.pdf" \
xmlns:saxon="http://saxon.sf.net/" xmlns:svg="http://www.w3.org/2000/svg" \
xmlns:tl="https://kindlegen.s3.amazonaws.com\
/AmazonKindlePublishingGuidelines.pdf" \
xmlns:xs="http://www.w3.org/2001/XMLSchema" \
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type" />
<link href="style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<mbp:frameset>
{group_contents}
</mbp:frameset>
</body>
</html>"""
GROUP_XHTML_WORD_DEFINITION_TEMPLATE = """<idx:entry \
scriptable="yes"{spellcheck_str}>
<idx:orth{value_headword}>{headword_visible}{infl}
</idx:orth>
<br/>{definition}
</idx:entry>
<hr/>"""
GROUP_XHTML_WORD_INFL_TEMPLATE = """<idx:infl>
{iforms_str}
</idx:infl>"""
GROUP_XHTML_WORD_IFORM_TEMPLATE = """<idx:iform \
value="{inflword}"{exact_str} />"""
OPF_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<package unique-identifier="uid">
<metadata>
<dc-metadata xmlns:dc="http://purl.org/metadata/dublin_core"
xmlns:oebpackage="http://openebook.org/namespaces/oeb-package/1.0/">
<dc:Title>{title}</dc:Title>
<dc:Language>{sourceLang}</dc:Language>
<dc:Identifier id="uid">{identifier}</dc:Identifier>
<dc:Creator>{creator}</dc:Creator>
<dc:Rights>{copyright}</dc:Rights>
<dc:description>{description}</dc:description>
<dc:Subject BASICCode="REF008000">Dictionaries</dc:Subject>
</dc-metadata>
<x-metadata>
<output encoding="utf-8"></output>
<DictionaryInLanguage>{sourceLang}</DictionaryInLanguage>
<DictionaryOutLanguage>{targetLang}</DictionaryOutLanguage>
<EmbeddedCover>{cover}</EmbeddedCover>
</x-metadata>
</metadata>
<manifest>
{manifest}
</manifest>
<spine>
{spine}
</spine>
<tours></tours>
<guide></guide>
</package>"""
def __init__(self, glos: GlossaryType) -> None:
import uuid
EbookWriter.__init__(
self,
glos,
)
glos.setInfo("uuid", str(uuid.uuid4()).replace("-", ""))
# FIXME: check if full html pages/documents as entry do work
# glos.stripFullHtml(errorHandler=None)
def get_prefix(self, word: str) -> str:
if not word:
return ""
length = self._group_by_prefix_length
prefix = word[:length].lower()
if prefix[0] < "a":
return "SPECIAL"
return prefix
def format_group_content(
self,
word: str,
defi: str,
variants: "list[str] | None" = None,
) -> str:
hide_word_index = self._hide_word_index
infl = ""
if variants:
iforms_list = [
self.GROUP_XHTML_WORD_IFORM_TEMPLATE.format(
inflword=variant,
exact_str=' exact="yes"' if self._exact else "",
)
for variant in variants
]
infl = "\n" + self.GROUP_XHTML_WORD_INFL_TEMPLATE.format(
iforms_str="\n".join(iforms_list),
)
headword = self.escape_if_needed(word)
defi = self.escape_if_needed(defi)
if hide_word_index:
headword_visible = ""
value_headword = f' value="{headword}"'
else:
headword_visible = "\n" + self._glos.wordTitleStr(headword)
value_headword = ""
return self.GROUP_XHTML_WORD_DEFINITION_TEMPLATE.format(
spellcheck_str=' spell="yes"' if self._spellcheck else "",
headword_visible=headword_visible,
value_headword=value_headword,
definition=defi,
infl=infl,
)
@staticmethod
def getLangCode(lang: "Lang | None") -> str:
return lang.code if isinstance(lang, Lang) else ""
def get_opf_contents(
self,
manifest_contents: str,
spine_contents: str,
) -> bytes:
cover = ""
if self.cover:
cover = self.COVER_TEMPLATE.format(cover=self.cover)
creationDate = datetime.now().strftime("%Y-%m-%d")
return self.OPF_TEMPLATE.format(
identifier=self._glos.getInfo("uuid"),
# use Language code instead name for kindlegen
sourceLang=self.getLangCode(self._glos.sourceLang),
targetLang=self.getLangCode(self._glos.targetLang),
title=self._glos.getInfo("name"),
creator=self._glos.author,
copyright=self._glos.getInfo("copyright"),
description=self._glos.getInfo("description"),
creationDate=creationDate,
cover=cover,
manifest=manifest_contents,
spine=spine_contents,
).encode("utf-8")
def write_groups(self) -> Generator[None, EntryType, None]:
def add_group(state: GroupStateBySize) -> None:
if state.group_size <= 0:
return
state.group_index += 1
index = state.group_index + self.GROUP_START_INDEX
group_xhtml_path = self.get_group_xhtml_file_name_from_index(index)
self.add_file_manifest(
"OEBPS/" + group_xhtml_path,
group_xhtml_path,
self.GROUP_XHTML_TEMPLATE.format(
group_contents=self.GROUP_XHTML_WORD_DEFINITION_JOINER.join(
state.group_contents,
),
).encode("utf-8"),
"application/xhtml+xml",
)
state = GroupStateBySize(self)
while True:
entry = yield
if entry is None:
break
if entry.isData():
continue
if state.group_size >= self._file_size_approx:
add_group(state)
state.reset()
state.add(entry)
add_group(state)
def write(self) -> Generator[None, EntryType, None]:
import shutil
import subprocess
filename = self._filename
kindlegen_path = self._kindlegen_path
yield from EbookWriter.write(self)
# download kindlegen from this page:
# https://www.amazon.com/gp/feature.html?ie=UTF8&docId=1000765211
# run kindlegen
if not kindlegen_path:
kindlegen_path = shutil.which("kindlegen") or ""
if not kindlegen_path:
log.warning(
f"Not running kindlegen, the raw files are located in {filename}",
)
log.warning(
"Provide KindleGen path with: --write-options 'kindlegen_path=...'",
)
return
# name = self._glos.getInfo("name")
log.info(f"Creating .mobi file with kindlegen, using {kindlegen_path!r}")
opf_path_abs = join(filename, "OEBPS", "content.opf")
proc = subprocess.Popen(
[kindlegen_path, opf_path_abs, "-gen_ff_mobi7", "-o", "content.mobi"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = proc.communicate()
log.info(output[0].decode("utf-8"))
mobi_path_abs = os.path.join(filename, "OEBPS", "content.mobi")
log.info(f"Created .mobi file with kindlegen: {mobi_path_abs}")
| 10,932
|
Python
|
.py
| 349
| 28.7851
| 79
| 0.726497
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,847
|
wordset.py
|
ilius_pyglossary/pyglossary/plugins/wordset.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
from collections.abc import Iterator
from json import load
from os import listdir
from os.path import isfile, join, splitext
from typing import TYPE_CHECKING
from pyglossary.core import log
from pyglossary.option import (
EncodingOption,
Option,
)
from pyglossary.sort_keys import lookupSortKey
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "wordset"
format = "Wordset"
description = "Wordset.org JSON directory"
extensions = ()
extensionCreate = "-wordset/"
singleFile = False
kind = "directory"
wiki = ""
website = (
"https://github.com/wordset/wordset-dictionary",
"@wordset/wordset-dictionary",
)
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
}
class Reader:
_encoding: str = "utf-8"
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
self.defiTemplate = (
"<p>"
'<font color="green">{speech_part}</font>'
"<br>"
"{def}"
"<br>"
"<i>{example}</i>"
"</p>"
)
"""
{
"id": "492099d426",
"def": "without musical accompaniment",
"example": "they performed a cappella",
"speech_part": "adverb"
},
"""
def close(self) -> None:
self._clear()
def _clear(self) -> None:
self._filename = ""
def open(self, filename: str) -> None:
self._filename = filename
name = self._glos.getInfo("name")
if not name or name == "data":
self._glos.setInfo("name", "Wordset.org")
self._glos.setDefaultDefiFormat("h")
def __len__(self) -> int:
return 0
@staticmethod
def fileNameSortKey(fname: str) -> str:
fname = splitext(fname)[0]
if fname == "misc":
return "\x80"
return fname
def __iter__(self) -> Iterator[EntryType]:
if not self._filename:
raise RuntimeError("iterating over a reader while it's not open")
direc = self._filename
encoding = self._encoding
glos = self._glos
for fname in sorted(listdir(direc), key=self.fileNameSortKey):
fpath = join(direc, fname)
if not (fname.endswith(".json") and isfile(fpath)):
continue
with open(fpath, encoding=encoding) as fileObj:
data = load(fileObj)
words = list(data)
namedSortKey = lookupSortKey("headword_lower")
if namedSortKey is None:
raise RuntimeError("namedSortKey is None")
sortKey = namedSortKey.normal("utf-8")
words.sort(key=sortKey)
for word in words:
entryDict = data[word]
defi = "".join(
self.defiTemplate.format(
**{
"word": word,
"def": meaning.get("def", ""),
"example": meaning.get("example", ""),
"speech_part": meaning.get("speech_part", ""),
},
)
for meaning in entryDict.get("meanings", [])
)
yield glos.newEntry(word, defi, defiFormat="h")
log.info(f"finished reading {fname}")
| 3,012
|
Python
|
.py
| 118
| 22.29661
| 68
| 0.671761
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,848
|
crawler_dir.py
|
ilius_pyglossary/pyglossary/plugins/crawler_dir.py
|
# mypy: ignore-errors
from __future__ import annotations
from collections.abc import Generator, Iterator
from hashlib import sha1
from os import listdir, makedirs
from os.path import dirname, isdir, isfile, join, splitext
from typing import TYPE_CHECKING
from pyglossary.compression import (
compressionOpenFunc,
)
from pyglossary.core import log
from pyglossary.option import (
Option,
StrOption,
)
from pyglossary.text_utils import (
escapeNTB,
splitByBarUnescapeNTB,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "crawler_dir"
format = "CrawlerDir"
description = "Crawler Directory"
extensions = (".crawler",)
extensionCreate = ".crawler/"
singleFile = True
kind = "directory"
wiki = ""
website = None
optionsProp: "dict[str, Option]" = {
"compression": StrOption(
values=["", "gz", "bz2", "lzma"],
comment="Compression Algorithm",
),
}
class Writer:
_compression: str = ""
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = None
def finish(self) -> None:
pass
def open(self, filename: str) -> None:
self._filename = filename
if not isdir(filename):
makedirs(filename)
@staticmethod
def filePathFromWord(b_word: bytes) -> str:
bw = b_word.lower()
if len(bw) <= 2:
return bw.hex()
if len(bw) <= 4:
return join(
bw[:2].hex() + ".d",
bw[2:].hex(),
)
return join(
bw[:2].hex() + ".d",
bw[2:4].hex() + ".d",
bw[4:8].hex() + "-" + sha1(b_word).hexdigest()[:8], # noqa: S324
)
def write(self) -> None:
from collections import OrderedDict as odict
from pyglossary.json_utils import dataToPrettyJson
filename = self._filename
wordCount = 0
compression = self._compression
c_open = compressionOpenFunc(compression)
if not c_open:
raise ValueError(f"invalid compression {compression!r}")
while True:
entry = yield
if entry is None:
break
if entry.isData():
continue
fpath = join(filename, self.filePathFromWord(entry.b_word))
if compression:
fpath = f"{fpath}.{compression}"
parentDir = dirname(fpath)
if not isdir(parentDir):
makedirs(parentDir)
if isfile(fpath):
log.warning(f"file exists: {fpath}")
fpath += f"-{sha1(entry.b_defi).hexdigest()[:4]}" # noqa: S324
with c_open(fpath, "wt", encoding="utf-8") as _file:
_file.write(
f"{escapeNTB(entry.s_word)}\n{entry.defi}",
)
wordCount += 1
with open(
join(filename, "info.json"),
mode="w",
encoding="utf-8",
) as infoFile:
info = odict()
info["name"] = self._glos.getInfo("name")
info["wordCount"] = wordCount
for key, value in self._glos.getExtraInfos(
(
"name",
"wordCount",
),
).items():
info[key] = value
infoFile.write(dataToPrettyJson(info))
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = None
self._wordCount = 0
def open(self, filename: str) -> None:
from pyglossary.json_utils import jsonToOrderedData
self._filename = filename
with open(join(filename, "info.json"), encoding="utf-8") as infoFp:
info = jsonToOrderedData(infoFp.read())
self._wordCount = info.pop("wordCount")
for key, value in info.items():
self._glos.setInfo(key, value)
def close(self) -> None:
pass
def __len__(self) -> int:
return self._wordCount
def _fromFile(self, fpath: str) -> EntryType:
_, ext = splitext(fpath)
c_open = compressionOpenFunc(ext.lstrip("."))
if not c_open:
log.error(f"invalid extension {ext}")
c_open = open
with c_open(fpath, "rt", encoding="utf-8") as _file:
words = splitByBarUnescapeNTB(_file.readline().rstrip("\n"))
defi = _file.read()
return self._glos.newEntry(words, defi)
@staticmethod
def _listdirSortKey(name: str) -> str:
name_nox, ext = splitext(name)
if ext == ".d":
return name
return name_nox
def _readDir(
self,
dpath: str,
exclude: "set[str] | None",
) -> Generator[EntryType, None, None]:
children = listdir(dpath)
if exclude:
children = [name for name in children if name not in exclude]
children.sort(key=self._listdirSortKey)
for name in children:
cpath = join(dpath, name)
if isfile(cpath):
yield self._fromFile(cpath)
continue
if isdir(cpath):
yield from self._readDir(cpath, None)
continue
log.error(f"Not a file nor a directory: {cpath}")
def __iter__(self) -> Iterator[EntryType]:
yield from self._readDir(
self._filename,
{
"info.json",
},
)
| 4,704
|
Python
|
.py
| 181
| 22.906077
| 69
| 0.684831
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,849
|
cc_kedict.py
|
ilius_pyglossary/pyglossary/plugins/cc_kedict.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
from __future__ import annotations
from collections.abc import Callable, Iterator
from io import BytesIO
from os.path import isdir, join
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import lxml
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
from pyglossary.core import exc_note, log, pip
from pyglossary.text_reader import TextGlossaryReader
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "cc_kedict"
format = "cc-kedict"
description = "cc-kedict"
extensions = ()
extensionCreate = ""
singleFile = True
kind = "text"
wiki = ""
website = (
"https://github.com/mhagiwara/cc-kedict",
"@mhagiwara/cc-kedict",
)
optionsProp: "dict[str, Option]" = {}
class YamlReader(TextGlossaryReader):
tagStyle = (
"color:white;"
"background:green;"
"padding-left:3px;"
"padding-right:3px;"
"border-radius:0.5ex;"
# 0.5ex ~= 0.3em, but "ex" is recommended
)
def __init__( # noqa: PLR0913
self,
glos: GlossaryType,
spellKey: str = "",
posKey: str = "",
synsKey: str = "",
tagsKey: str = "",
) -> None:
TextGlossaryReader.__init__(self, glos)
self._spellKey = spellKey
self._posKey = posKey
self._synsKey = synsKey
self._tagsKey = tagsKey
self._posMapping = {
"n": "noun",
"v": "verb",
"a": "adjective",
"pron": "pronoun",
"propn": "proper noun",
"intj": "interjection",
"det": "determiner",
"part": "particle",
"adv": "adverb",
"num": "number",
"abbrev": "abbreviation",
"suf": "suffix",
"pref": "prefix",
}
@classmethod
def isInfoWord(cls, _word: str) -> bool:
return False
@classmethod
def fixInfoWord(cls, _word: str) -> str:
return ""
@staticmethod
def _makeList(
hf: "lxml.etree.htmlfile",
input_objects: list[Any],
processor: Callable,
single_prefix: "str | None" = None,
skip_single: bool = True,
) -> None:
"""Wrap elements into <ol> if more than one element."""
if not input_objects:
return
if skip_single and len(input_objects) == 1:
# if single_prefix is None:
# single_prefix = ET.Element("br")
if single_prefix:
hf.write(single_prefix)
processor(hf, input_objects[0], 1)
return
with hf.element("ol"):
for el in input_objects:
with hf.element("li"):
processor(hf, el, len(input_objects))
def _processExample( # noqa: PLR6301
self,
hf: "lxml.etree.htmlfile",
exampleDict: dict,
_count: int,
) -> None:
from lxml import etree as ET
if not exampleDict.get("example"):
log.error(f"invalid example: {exampleDict}")
return
hf.write(exampleDict["example"])
transliteration = exampleDict.get("transliteration")
if transliteration:
hf.write(ET.Element("br"))
with hf.element("font", color="green"):
hf.write(f"{transliteration}")
translation = exampleDict.get("translation")
if translation:
hf.write(ET.Element("br"))
with hf.element("i"):
hf.write(f"{translation}")
def _processDef(
self,
hf: "lxml.etree.htmlfile",
defDict: dict,
count: int,
) -> None:
from lxml import etree as ET
text = defDict.get("def", "")
if text:
hf.write(text)
examples = defDict.get("examples")
if examples:
if text:
if count == 1:
hf.write(ET.Element("br"))
hf.write(ET.Element("br"))
with hf.element("i"):
hf.write("Examples:")
self._makeList(
hf,
examples,
self._processExample,
skip_single=False,
)
def _processNote( # noqa: PLR6301
self,
hf: "lxml.etree.htmlfile",
note: str,
_count: int,
) -> None:
hf.write(note)
def _processEntry(
self,
hf: "lxml.etree.htmlfile",
edict: dict,
) -> None:
from lxml import etree as ET
if self._spellKey and self._spellKey in edict:
spelling = edict[self._spellKey]
if not isinstance(spelling, str):
log.error(f"{spelling=}, {type(spelling)=}, {edict=}")
# https://github.com/mhagiwara/cc-kedict/pull/1
spelling = "on" if spelling is True else ""
if spelling:
with hf.element("font", color="green"):
hf.write(spelling)
hf.write(ET.Element("br"))
if self._posKey and self._posKey in edict:
pos = edict[self._posKey]
pos = self._posMapping.get(pos, pos)
with hf.element("i"):
hf.write(pos.capitalize())
hf.write(ET.Element("br"))
if self._tagsKey and self._tagsKey in edict:
tags = edict[self._tagsKey]
for i, tag in enumerate(tags):
if i > 0:
hf.write(" ")
with hf.element("span", style=self.tagStyle):
hf.write(tag)
hf.write(ET.Element("br"))
defs = edict.get("defs")
if defs:
self._makeList(
hf,
defs,
self._processDef,
)
if self._synsKey and self._synsKey in edict:
hf.write("Synonyms: ")
for i, word in enumerate(edict[self._synsKey]):
if i > 0:
with hf.element("big"):
hf.write(" | ") # NESTED: 5
with hf.element("a", href=f"bword://{word}"):
hf.write(word)
hf.write(ET.Element("br"))
notes = edict.get("notes")
if notes:
hf.write(ET.Element("br"))
hf.write("Notes:")
self._makeList(
hf,
notes,
self._processNote,
skip_single=False,
)
def _createEntry(
self,
yamlBlock: str,
) -> tuple[str, str, None] | None:
from lxml import etree as ET
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
edict = load(yamlBlock, Loader=Loader)
word = edict.get("word")
if not word:
log.error(f"no word in {edict}")
return None
f = BytesIO()
with ET.htmlfile(f, encoding="utf-8") as hf:
with hf.element("div"):
self._processEntry(hf, edict)
defi = f.getvalue().decode("utf-8")
return word, defi, None
def nextBlock(self) -> EntryType:
if not self._file:
raise StopIteration
lines = []
while True:
line = self.readline()
if not line:
break
line = line.rstrip("\n\r")
if not line:
continue
if line.startswith("- "):
line = " " + line[1:]
if lines:
self._bufferLine = line
return self._createEntry("\n".join(lines))
lines.append(line)
if lines:
return self._createEntry("\n".join(lines))
raise StopIteration
class Reader:
depends = {
"yaml": "PyYAML",
"lxml": "lxml",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._yaml = YamlReader(
glos,
spellKey="romaja",
posKey="pos",
synsKey="syns",
tagsKey="tags",
)
def __len__(self) -> int:
return 0
def open(self, filename: str) -> None:
try:
from lxml import etree as ET # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
if isdir(filename):
filename = join(filename, "kedict.yml")
self._filename = filename
self._glos.sourceLangName = "Korean"
self._glos.targetLangName = "English"
self._glos.setDefaultDefiFormat("h")
self._yaml.open(filename)
def close(self) -> None:
self._yaml.close()
def __iter__(self) -> Iterator[EntryType]:
yield from self._yaml
| 7,164
|
Python
|
.py
| 288
| 21.451389
| 62
| 0.663736
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,850
|
digitalnk.py
|
ilius_pyglossary/pyglossary/plugins/digitalnk.py
|
# -*- coding: utf-8 -*-
from __future__ import annotations
import html
from collections.abc import Iterator
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import sqlite3
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.option import Option
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "digitalnk"
format = "DigitalNK"
description = "DigitalNK (SQLite3, N-Korean)"
extensions = ()
extensionCreate = ".db"
singleFile = True
kind = "binary"
wiki = ""
website = (
"https://github.com/digitalprk/dicrs",
"@digitalprk/dicrs",
)
optionsProp: "dict[str, Option]" = {}
class Reader:
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._clear()
def _clear(self) -> None:
self._filename = ""
self._con: "sqlite3.Connection | None" = None
self._cur: "sqlite3.Cursor | None" = None
def open(self, filename: str) -> None:
from sqlite3 import connect
self._filename = filename
self._con = connect(filename)
self._cur = self._con.cursor()
self._glos.setDefaultDefiFormat("m")
def __len__(self) -> int:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute("select count(*) from dictionary")
return self._cur.fetchone()[0]
def __iter__(self) -> Iterator[EntryType]:
if self._cur is None:
raise ValueError("cur is None")
self._cur.execute(
"select word, definition from dictionary order by word",
)
# iteration over self._cur stops after one entry
# and self._cur.fetchone() returns None
# no idea why!
# https://github.com/ilius/pyglossary/issues/282
# for row in self._cur:
for row in self._cur.fetchall():
word = html.unescape(row[0])
definition = row[1]
yield self._glos.newEntry(word, definition, defiFormat="m")
def close(self) -> None:
if self._cur:
self._cur.close()
if self._con:
self._con.close()
self._clear()
| 2,017
|
Python
|
.py
| 77
| 23.766234
| 62
| 0.70109
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,851
|
ebook_epub2.py
|
ilius_pyglossary/pyglossary/plugins/ebook_epub2.py
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright © 2012-2016 Alberto Pettarin (alberto@albertopettarin.it)
# Copyright © 2016-2019 Saeed Rasooli <saeed.gnu@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from pyglossary.ebook_base import EbookWriter
from pyglossary.flags import ALWAYS
from pyglossary.option import (
BoolOption,
IntOption,
Option,
StrOption,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "epub2"
format = "Epub2"
description = "EPUB-2 E-Book"
extensions = (".epub",)
extensionCreate = ".epub"
singleFile = True
sortOnWrite = ALWAYS
sortKeyName = "ebook"
kind = "package"
wiki = "https://en.wikipedia.org/wiki/EPUB"
website = None
# EPUB-3: https://www.w3.org/community/epub3/
optionsProp: "dict[str, Option]" = {
"group_by_prefix_length": IntOption(
comment="Prefix length for grouping",
),
# "group_by_prefix_merge_min_size": IntOption(),
# "group_by_prefix_merge_across_first": BoolOption(),
"compress": BoolOption(
comment="Enable compression",
),
"keep": BoolOption(
comment="Keep temp files",
),
"include_index_page": BoolOption(
comment="Include index page",
),
"css": StrOption(
comment="Path to css file",
),
"cover_path": StrOption(
comment="Path to cover file",
),
}
class Writer(EbookWriter):
# these class attrs are only in Epub
# MIMETYPE_CONTENTS, CONTAINER_XML_CONTENTS
# NCX_TEMPLATE, NCX_NAVPOINT_TEMPLATE
MIMETYPE_CONTENTS = "application/epub+zip"
CONTAINER_XML_CONTENTS = """<?xml version="1.0" encoding="UTF-8" ?>
<container version="1.0"
xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="OEBPS/content.opf"
media-type="application/oebps-package+xml"/>
</rootfiles>
</container>"""
NCX_TEMPLATE = """<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN"
"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1">
<head>
<meta name="dtb:uid" content="{identifier}" />
<meta name="dtb:depth" content="1" />
<meta name="dtb:totalPageCount" content="0" />
<meta name="dtb:maxPageNumber" content="0" />
</head>
<docTitle>
<text>{title}</text>
</docTitle>
<navMap>
{ncx_items}
</navMap>
</ncx>"""
NCX_NAVPOINT_TEMPLATE = """\t<navPoint id="n{index:06d}" playOrder="{index:d}">
<navLabel>
<text>{text}</text>
</navLabel>
<content src="{src}" />
</navPoint>"""
CSS_CONTENTS = b"""@charset "UTF-8";
body {
margin: 10px 25px 10px 25px;
}
h1 {
font-size: 200%;
}
h2 {
font-size: 150%;
}
p {
margin-left: 0em;
margin-right: 0em;
margin-top: 0em;
margin-bottom: 0em;
line-height: 2em;
text-align: justify;
}
a, a:focus, a:active, a:visited {
color: black;
text-decoration: none;
}
body.indexPage {}
h1.indexTitle {}
p.indexGroups {
font-size: 150%;
}
span.indexGroup {}
body.groupPage {}
h1.groupTitle {}
div.groupNavigation {}
span.groupHeadword {}
div.groupEntry {
margin-top: 0;
margin-bottom: 1em;
}
h2.groupHeadword {
margin-left: 5%;
}
p.groupDefinition {
margin-left: 10%;
margin-right: 10%;
}
"""
GROUP_XHTML_TEMPLATE = """<?xml version="1.0" encoding="utf-8" standalone="no"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>{title}</title>
<link rel="stylesheet" type="text/css" href="style.css" />
</head>
<body id="groupPage" class="groupPage">
<h1 class="groupTitle">{group_title}</h1>
<div class="groupNavigation">
<a href="{previous_link}">[ Previous ]</a>
{index_link}
<a href="{next_link}">[ Next ]</a>
</div>
{group_contents}
</body>
</html>"""
GROUP_XHTML_INDEX_LINK = '\t\t<a href="index.xhtml">[ Index ]</a>'
GROUP_XHTML_WORD_DEFINITION_TEMPLATE = """\t<div class="groupEntry">
<h2 class="groupHeadword">{headword}</h2>
<p class="groupDefinition">{definition}</p>
</div>"""
OPF_TEMPLATE = """<?xml version="1.0" encoding="utf-8" ?>
<package xmlns="http://www.idpf.org/2007/opf" version="2.0"
unique-identifier="uid">
<metadata xmlns:opf="http://www.idpf.org/2007/opf"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<dc:identifier id="uid" opf:scheme="uuid">{identifier}</dc:identifier>
<dc:language>{sourceLang}</dc:language>
<dc:title>{title}</dc:title>
<dc:creator opf:role="aut">{creator}</dc:creator>
<dc:rights>{copyright}</dc:rights>
<dc:date opf:event="creation">{creationDate}</dc:date>
{cover}
</metadata>
<manifest>
{manifest}
</manifest>
<spine toc="toc.ncx">
{spine}
</spine>
</package>"""
COVER_TEMPLATE = '<meta name="cover" content="{cover}" />'
def __init__(self, glos: GlossaryType) -> None:
import uuid
EbookWriter.__init__(
self,
glos,
)
glos.setInfo("uuid", str(uuid.uuid4()).replace("-", ""))
@classmethod
def cls_get_prefix(
cls: type[EbookWriter],
options: "dict[str, Any]",
word: str,
) -> str:
if not word:
return ""
length = options.get("group_by_prefix_length", cls._group_by_prefix_length)
prefix = word[:length].lower()
if prefix[0] < "a":
return "SPECIAL"
return prefix
def get_prefix(self, word: str) -> str:
if not word:
return ""
length = self._group_by_prefix_length
prefix = word[:length].lower()
if prefix[0] < "a":
return "SPECIAL"
return prefix
def write_ncx(self, group_labels: list[str]) -> None:
"""
write_ncx
only for epub.
"""
ncx_items = []
index = 1
if self._include_index_page:
ncx_items.append(
self.NCX_NAVPOINT_TEMPLATE.format(
index=index,
text="Index",
src="index.xhtml",
),
)
index += 1
for group_label in group_labels:
ncx_items.append(
self.NCX_NAVPOINT_TEMPLATE.format(
index=index,
text=group_label,
src=self.get_group_xhtml_file_name_from_index(index),
),
)
index += 1
ncx_items_unicode = "\n".join(ncx_items)
ncx_contents = self.NCX_TEMPLATE.format(
identifier=self._glos.getInfo("uuid"),
title=self._glos.getInfo("name"),
ncx_items=ncx_items_unicode,
).encode("utf-8")
self.add_file_manifest(
"OEBPS/toc.ncx",
"toc.ncx",
ncx_contents,
"application/x-dtbncx+xml",
)
# inherits write from EbookWriter
| 7,500
|
Python
|
.py
| 270
| 25.474074
| 81
| 0.703123
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,852
|
appledict_properties.py
|
ilius_pyglossary/pyglossary/plugins/appledict_bin/appledict_properties.py
|
# -*- coding: utf-8 -*-
# Copyright © 2023 soshial <soshial@gmail.com> (soshial)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
from dataclasses import dataclass
__all__ = ["AppleDictProperties", "from_metadata"]
@dataclass(slots=True, frozen=True)
class AppleDictProperties:
# in plist file: IDXDictionaryVersion
# values := (1 | 2 | 3)
format_version: int
# in plist file: HeapDataCompressionType values := (absent | 1 | 2)
body_compression_type: int
# in plist file: for field with "IDXDataFieldName" equal "DCSExternalBodyID"
# "IDXDataSize" value = 4 or 8
body_has_sections: bool
# in plist file for key_text_metadata:
# 'TrieAuxiliaryDataOptions' -> 'HeapDataCompressionType'
key_text_compression_type: int
# in plist file: IDXIndexDataFields / "IDXFixedDataFields"
# Example: ["DCSPrivateFlag"]
key_text_fixed_fields: list[str]
# in plist file: IDXIndexDataFields / "IDXVariableDataFields"
# Example: ["DCSKeyword", "DCSHeadword", "DCSEntryTitle",
# "DCSAnchor", "DCSYomiWord"]
key_text_variable_fields: list[str]
# DCSDictionaryCSS, generally "DefaultStyle.css"
css_name: "str | None"
def from_metadata(metadata: dict) -> AppleDictProperties:
format_version: int = metadata.get("IDXDictionaryVersion", -1)
dictionaryIndexes: "list[dict] | None" = metadata.get("IDXDictionaryIndexes")
if dictionaryIndexes:
key_text_metadata = dictionaryIndexes[0]
body_metadata = dictionaryIndexes[2]
else:
key_text_metadata = {}
body_metadata = {}
key_text_data_fields = key_text_metadata.get("IDXIndexDataFields", {})
key_text_variable_fields = [
field_data["IDXDataFieldName"]
for field_data in key_text_data_fields.get("IDXVariableDataFields", [])
]
key_text_fixed_field = [
fixed_field["IDXDataFieldName"]
for fixed_field in key_text_data_fields.get("IDXFixedDataFields", [])
]
external_data_fields = key_text_data_fields.get("IDXExternalDataFields")
body_compression_type = body_metadata.get("HeapDataCompressionType", 0)
body_has_sections = (
body_compression_type == 2 and external_data_fields[0].get("IDXDataSize") == 8
)
if (
"TrieAuxiliaryDataOptions" in key_text_metadata
and "HeapDataCompressionType" in key_text_metadata["TrieAuxiliaryDataOptions"]
):
key_text_compression_type = key_text_metadata["TrieAuxiliaryDataOptions"][
"HeapDataCompressionType"
]
else:
key_text_compression_type = 0
css_name = metadata.get("DCSDictionaryCSS")
return AppleDictProperties(
format_version=format_version,
body_compression_type=body_compression_type,
body_has_sections=body_has_sections,
key_text_compression_type=key_text_compression_type,
key_text_fixed_fields=key_text_fixed_field,
key_text_variable_fields=key_text_variable_fields,
css_name=css_name,
)
| 3,328
|
Python
|
.py
| 81
| 38.839506
| 80
| 0.767492
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,853
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/appledict_bin/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright © 2019 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import os
import re
from collections.abc import Iterator
from datetime import datetime
from io import BytesIO
from operator import attrgetter
from os.path import isdir, isfile, join, split, splitext
from struct import unpack
from typing import (
TYPE_CHECKING,
Any,
cast,
)
from lxml import etree
from .appledict_file_tools import (
APPLEDICT_FILE_OFFSET,
guessFileOffsetLimit,
read_2_bytes_here,
read_x_bytes_as_word,
readInt,
)
from .appledict_properties import from_metadata
from .article_address import ArticleAddress
from .key_data import KeyData, RawKeyData
if TYPE_CHECKING:
import io
from lxml.html import ( # type: ignore
HtmlComment,
HtmlElement,
HtmlEntity,
HtmlProcessingInstruction,
)
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element
from .appledict_properties import AppleDictProperties
from zlib import decompress
from pyglossary import core
from pyglossary.apple_utils import substituteAppleCSS
from pyglossary.core import exc_note, log, pip
from pyglossary.io_utils import nullBinaryIO
from pyglossary.option import BoolOption, Option
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "appledict_bin"
format = "AppleDictBin"
description = "AppleDict Binary"
extensions = (".dictionary", ".data")
extensionCreate = ""
singleFile = True
kind = "binary"
wiki = ""
website = (
"https://support.apple.com/en-gu/guide/dictionary/welcome/mac",
"Dictionary User Guide for Mac",
)
optionsProp: "dict[str, Option]" = {
"html": BoolOption(comment="Entries are HTML"),
"html_full": BoolOption(
comment="Turn every entry's definition into an HTML document",
),
}
class Reader:
depends = {
"lxml": "lxml",
"biplist": "biplist",
}
_html: bool = True
_html_full: bool = True
resNoExt = {
".data",
".index",
".plist",
".xsl",
".html",
".strings",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos: GlossaryType = glos
self._dictDirPath = ""
self._contentsPath = ""
self._file: "io.BufferedIOBase" = nullBinaryIO
self._encoding = "utf-8"
self._defiFormat = "m"
self._re_xmlns = re.compile(' xmlns:d="[^"<>]+"')
self._titleById: "dict[str, str]" = {}
self._wordCount = 0
self._keyTextData: "dict[ArticleAddress, list[RawKeyData]]" = {}
self._cssName = ""
@staticmethod
def tostring(
elem: "Element | HtmlComment | HtmlElement | "
"HtmlEntity | HtmlProcessingInstruction",
) -> str:
from lxml.html import tostring
return tostring(
cast("HtmlElement", elem),
encoding="utf-8",
method="html",
).decode("utf-8")
def fixLink(self, a: Element) -> Element:
href = a.attrib.get("href", "")
if href.startswith("x-dictionary:d:"):
word = href[len("x-dictionary:d:") :]
a.attrib["href"] = href = f"bword://{word}"
elif href.startswith("x-dictionary:r:"):
# https://github.com/ilius/pyglossary/issues/343
id_i = len("x-dictionary:r:")
id_j = href.find(":", id_i)
_id = href[id_i:id_j]
title = self._titleById.get(_id)
if title:
a.attrib["href"] = href = f"bword://{title}"
else:
title = a.attrib.get("title")
if title:
a.attrib["href"] = href = f"bword://{title}"
elif href.startswith(("http://", "https://")):
pass
else:
a.attrib["href"] = f"bword://{href}"
return a
# TODO: PLR0912 Too many branches (17 > 12)
def open(self, filename: str) -> Iterator[tuple[int, int]]: # noqa: PLR0912
from os.path import dirname
try:
from lxml import etree # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install lxml` to install")
raise
try:
import biplist # type: ignore # noqa: F401
except ModuleNotFoundError as e:
exc_note(e, f"Run `{pip} install biplist` to install")
raise
self._defiFormat = "h" if self._html else "m"
dictDirPath: str
contentsPath: str
infoPlistPath: str
bodyDataPath: str
keyTextDataPath: str
if isdir(filename):
if split(filename)[-1] == "Contents":
contentsPath = filename
dictDirPath = dirname(filename)
elif isdir(join(filename, "Contents")):
contentsPath = join(filename, "Contents")
dictDirPath = filename
else:
raise OSError(f"invalid directory {filename}")
elif split(filename)[-1] == "Body.data":
# Maybe we should remove this support in a future release
parentPath = dirname(filename)
parentName = split(parentPath)[-1]
if parentName == "Contents":
contentsPath = parentPath
elif parentName == "Resources":
contentsPath = dirname(parentPath)
else:
raise OSError(f"invalid file path {filename}")
dictDirPath = dirname(contentsPath)
else:
raise OSError(f"invalid file path {filename}")
if not isdir(contentsPath):
raise OSError(
f"{contentsPath} is not a folder, "
"Please provide 'Contents/' folder of the dictionary",
)
infoPlistPath = join(contentsPath, "Info.plist")
if isfile(join(contentsPath, "Body.data")):
bodyDataPath = join(contentsPath, "Body.data")
keyTextDataPath = join(contentsPath, "KeyText.data")
elif isfile(join(contentsPath, "Resources/Body.data")):
bodyDataPath = join(contentsPath, "Resources/Body.data")
keyTextDataPath = join(contentsPath, "Resources/KeyText.data")
else:
raise OSError(
"could not find Body.data file, "
"Please provide 'Contents/' folder of the dictionary",
)
metadata = self.parseMetadata(infoPlistPath)
self.setMetadata(metadata)
yield from self.setKeyTextData(
keyTextDataPath,
self._properties,
)
self._dictDirPath = dictDirPath
self._contentsPath = contentsPath
self._file = open(bodyDataPath, "rb")
_, self._limit = guessFileOffsetLimit(self._file)
t0 = datetime.now()
self.readEntryIds()
dt = datetime.now() - t0
log.info(
f"Reading entry IDs took {int(dt.total_seconds() * 1000)} ms, "
f"number of entries: {self._wordCount}",
)
@staticmethod
def parseMetadata(infoPlistPath: str) -> dict[str, Any]:
import biplist
if not isfile(infoPlistPath):
raise OSError(
"Could not find 'Info.plist' file, "
"Please provide 'Contents/' folder of the dictionary",
)
metadata: "dict[str, Any]"
try:
metadata = biplist.readPlist(infoPlistPath)
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException):
try:
import plistlib
with open(infoPlistPath, "rb") as plist_file:
metadata = plistlib.loads(plist_file.read())
except Exception as e:
raise OSError(
"'Info.plist' file is malformed, "
f"Please provide 'Contents/' with a correct 'Info.plist'. {e}",
) from e
return metadata
def setMetadata(self, metadata: "dict[str, Any]") -> None:
name = metadata.get("CFBundleDisplayName")
if not name:
name = metadata.get("CFBundleIdentifier")
if name:
self._glos.setInfo("name", name)
identifier = metadata.get("CFBundleIdentifier")
if identifier and identifier != name:
self._glos.setInfo("CFBundleIdentifier", identifier)
_copyright = metadata.get("DCSDictionaryCopyright")
if _copyright:
self._glos.setInfo("copyright", _copyright)
author = metadata.get("DCSDictionaryManufacturerName")
if author:
self._glos.setInfo("author", author)
edition = metadata.get("CFBundleInfoDictionaryVersion")
if edition:
self._glos.setInfo("edition", edition)
if "DCSDictionaryLanguages" in metadata:
self.setLangs(metadata)
self._properties = from_metadata(metadata)
self._cssName = self._properties.css_name or "DefaultStyle.css"
def setLangs(self, metadata: "dict[str, Any]") -> None:
import locale
langsList = metadata.get("DCSDictionaryLanguages")
if not langsList:
return
langs = langsList[0]
sourceLocale = langs["DCSDictionaryDescriptionLanguage"]
self._glos.sourceLangName = locale.normalize(sourceLocale).split("_")[0]
targetLocale = langs["DCSDictionaryIndexLanguage"]
self._glos.targetLangName = locale.normalize(targetLocale).split("_")[0]
def __len__(self) -> int:
return self._wordCount
def close(self) -> None:
self._file.close()
self._file = nullBinaryIO
def _getDefi(
self,
entryElem: Element,
) -> str:
if not self._html:
# FIXME: this produces duplicate text for Idioms.dictionary, see #301
return "".join(
self.tostring(child) for child in entryElem.iterdescendants()
)
entryElem.tag = "div"
for attr in list(entryElem.attrib):
# if attr == "id" or attr.endswith("title"):
del entryElem.attrib[attr]
for a_link in entryElem.xpath("//a"):
self.fixLink(a_link)
defi = self.tostring(entryElem)
defi = self._re_xmlns.sub("", defi)
if self._html_full:
defi = (
"<!DOCTYPE html><html><head>"
'<link rel="stylesheet" href="style.css">'
f"</head><body>{defi}</body></html>"
)
return defi
@staticmethod
def getChunkLenOffset(
pos: int,
buffer: bytes,
) -> tuple[int, int]:
"""
@return chunk byte length and offset.
offset is usually 4 bytes integer, that contains chunk/entry byte length
"""
offset = buffer[pos : pos + 12].find(b"<d:entry")
if offset == -1:
log.info(f"{buffer[pos:]=}")
raise OSError("Could not find entry tag <d:entry>")
if offset == 0:
# when no such info (offset equals 0) provided,
# we take all bytes till the closing tag or till section end
endI = buffer[pos:].find(b"</d:entry>\n")
chunkLen = len(buffer) - pos if endI == -1 else endI + 11
else:
bs = buffer[pos : pos + offset]
if offset < 4:
bs = b"\x00" * (4 - offset) + bs
try:
(chunkLen,) = unpack("i", bs)
except Exception as e:
log.error(f"{buffer[pos:pos + 100]!r}")
raise e from None
return chunkLen, offset
def createEntry(
self,
entryBytes: bytes,
articleAddress: ArticleAddress,
) -> EntryType | None:
# 1. create and validate XML of the entry's body
entryRoot = self.convertEntryBytesToXml(entryBytes)
if entryRoot is None:
return None
namespaces: "dict[str, str]" = {
key: value for key, value in entryRoot.nsmap.items() if key and value
}
entryElems = entryRoot.xpath("/d:entry", namespaces=namespaces)
if not entryElems:
return None
word = entryElems[0].xpath("./@d:title", namespaces=namespaces)[0]
# 2. add alts
keyTextFieldOrder = self._properties.key_text_variable_fields
words = [word]
keyDataList: list[KeyData] = [
KeyData.fromRaw(rawKeyData, keyTextFieldOrder)
for rawKeyData in self._keyTextData.get(articleAddress, [])
]
if keyDataList:
keyDataList.sort(
key=attrgetter("priority"),
reverse=True,
)
words += [keyData.keyword for keyData in keyDataList]
defi = self._getDefi(entryElems[0])
return self._glos.newEntry(
word=words,
defi=defi,
defiFormat=self._defiFormat,
byteProgress=(self._absPos, self._limit),
)
def convertEntryBytesToXml(
self,
entryBytes: bytes,
) -> Element | None:
if not entryBytes.strip():
return None
try:
entryRoot = etree.fromstring(entryBytes)
except etree.XMLSyntaxError as e:
log.error(
f"{entryBytes=}",
)
raise e from None
if self._limit <= 0:
raise ValueError(f"self._limit = {self._limit}")
return entryRoot
def readEntryIds(self) -> None:
titleById = {}
for entryBytesTmp, _ in self.yieldEntryBytes(
self._file,
self._properties,
):
entryBytes = entryBytesTmp.strip()
if not entryBytes:
continue
id_i = entryBytes.find(b'id="')
if id_i < 0:
log.error(f"id not found: {entryBytes!r}")
continue
id_j = entryBytes.find(b'"', id_i + 4)
if id_j < 0:
log.error(f"id closing not found: {entryBytes.decode(self._encoding)}")
continue
_id = entryBytes[id_i + 4 : id_j].decode(self._encoding)
title_i = entryBytes.find(b'd:title="')
if title_i < 0:
log.error(f"title not found: {entryBytes.decode(self._encoding)}")
continue
title_j = entryBytes.find(b'"', title_i + 9)
if title_j < 0:
log.error(
f"title closing not found: {entryBytes.decode(self._encoding)}",
)
continue
titleById[_id] = entryBytes[title_i + 9 : title_j].decode(self._encoding)
self._titleById = titleById
self._wordCount = len(titleById)
def setKeyTextData(
self,
morphoFilePath: str,
properties: AppleDictProperties,
) -> Iterator[tuple[int, int]]:
"""
Prepare `KeyText.data` file for extracting morphological data.
Returns an iterator/generator for the progress
Sets self._keyTextData when done
"""
with open(morphoFilePath, "rb") as keyTextFile:
fileDataOffset, fileLimit = guessFileOffsetLimit(keyTextFile)
buff = BytesIO()
if properties.key_text_compression_type > 0:
keyTextFile.seek(fileDataOffset + APPLEDICT_FILE_OFFSET)
sectionLength = readInt(keyTextFile)
sectionOffset = keyTextFile.tell()
fileLimitDecompressed = 0
while keyTextFile.tell() < fileLimit + APPLEDICT_FILE_OFFSET:
compressedSectionByteLen = readInt(keyTextFile)
decompressedSectionByteLen = readInt(keyTextFile)
if compressedSectionByteLen == decompressedSectionByteLen == 0:
break
chunk_section_compressed = keyTextFile.read(
compressedSectionByteLen - 4,
)
chunksection_bytes = decompress(chunk_section_compressed)
buff.write(chunksection_bytes)
fileLimitDecompressed += decompressedSectionByteLen
sectionOffset += max(sectionLength, compressedSectionByteLen + 4)
keyTextFile.seek(sectionOffset)
bufferOffset = 0
bufferLimit = fileLimitDecompressed
else:
keyTextFile.seek(APPLEDICT_FILE_OFFSET)
buff.write(keyTextFile.read())
bufferOffset = fileDataOffset
bufferLimit = fileLimit
yield from self.readKeyTextData(
buff=buff,
bufferOffset=bufferOffset,
bufferLimit=bufferLimit,
properties=properties,
)
# TODO: PLR0912 Too many branches (16 > 12)
def readKeyTextData( # noqa: PLR0912
self,
buff: "io.BufferedIOBase",
bufferOffset: int,
bufferLimit: int,
properties: AppleDictProperties,
) -> Iterator[tuple[int, int]]:
"""
Returns an iterator/generator for the progress
Sets self._keyTextData when done.
"""
buff.seek(bufferOffset)
keyTextData: "dict[ArticleAddress, list[RawKeyData]]" = {}
while bufferOffset < bufferLimit:
yield (bufferOffset, bufferLimit)
buff.seek(bufferOffset)
next_section_jump = readInt(buff)
if properties.key_text_compression_type == 0:
big_len = readInt(buff) # noqa: F841
# number of lexemes
wordFormCount = read_2_bytes_here(buff) # 0x01
next_lexeme_offset: int = 0
for _ in range(wordFormCount):
_ = read_2_bytes_here(buff) # 0x00
# TODO might be 1 or 2 or more zeros
if next_lexeme_offset != 0:
buff.seek(next_lexeme_offset)
small_len = 0
while small_len == 0:
small_len = read_2_bytes_here(buff) # 0x2c
curr_offset = buff.tell()
next_lexeme_offset = curr_offset + small_len
# the resulting number must match with Contents/Body.data
# address of the entry
articleAddress: ArticleAddress
if properties.body_has_sections:
chunkOffset = readInt(buff)
sectionOffset = readInt(buff)
articleAddress = ArticleAddress(
sectionOffset=sectionOffset,
chunkOffset=chunkOffset,
)
else:
chunkOffset = 0x0
sectionOffset = readInt(buff)
articleAddress = ArticleAddress(
sectionOffset=sectionOffset,
chunkOffset=chunkOffset,
)
if len(properties.key_text_fixed_fields) == 0:
priority = 0
parentalControl = 0
elif len(properties.key_text_fixed_fields) == 1:
priorityAndParentalControl = read_2_bytes_here(buff) # 0x13
# "DCSDictionaryLanguages" array inside plist file has a list of
# dictionaries inside this file
# This DCSPrivateFlag per each article provides not only priority
# and parental control, but also a flag of translation direction:
# 0x0-0x1f values are reserved for the first language from the
# DCSDictionaryLanguages array 0x20-0x3f values are reserved for
# the second language etc.
if priorityAndParentalControl >= 0x40:
log.error(
"WRONG priority or parental control:"
f"{priorityAndParentalControl} (section: {bufferOffset:#x})"
", skipping KeyText.data file",
)
return
if priorityAndParentalControl >= 0x20:
priorityAndParentalControl -= 0x20
# d:parental-control="1"
parentalControl = priorityAndParentalControl % 2
# d:priority=".." between 0x00..0x12, priority = [0..9]
priority = (priorityAndParentalControl - parentalControl) // 2
else:
log.error(
f"Unknown private field: {properties.key_text_fixed_fields}",
)
return
keyTextFields: list[str] = []
while buff.tell() < next_lexeme_offset:
word_form_len = read_2_bytes_here(buff)
if word_form_len == 0:
keyTextFields.append("")
continue
word_form = read_x_bytes_as_word(buff, word_form_len)
keyTextFields.append(word_form)
entryKeyTextData: RawKeyData = (
priority,
parentalControl,
tuple(keyTextFields),
)
if articleAddress in keyTextData:
keyTextData[articleAddress].append(entryKeyTextData)
else:
keyTextData[articleAddress] = [entryKeyTextData]
bufferOffset += next_section_jump + 4
self._keyTextData = keyTextData
def readResFile(self, fname: str, fpath: str, ext: str) -> EntryType:
with open(fpath, "rb") as _file:
data = _file.read()
if ext == ".css":
log.debug(f"substituting apple css: {fname}: {fpath}")
data = substituteAppleCSS(data)
return self._glos.newDataEntry(fname, data)
def fixResFilename(self, fname: str, relPath: str):
if fname == self._cssName:
fname = "style.css"
if relPath:
fname = relPath + "/" + fname
if os.path == "\\":
fname = fname.replace("\\", "/")
return fname
def readResDir(
self,
dirPath: str,
recurse: bool = False,
relPath: str = "",
) -> Iterator[EntryType]:
if not isdir(dirPath):
return
resNoExt = self.resNoExt
for fname in os.listdir(dirPath):
if fname == "Resources":
continue
_, ext = splitext(fname)
if ext in resNoExt:
continue
fpath = join(dirPath, fname)
if isdir(fpath):
if recurse:
yield from self.readResDir(
fpath,
recurse=True,
relPath=join(relPath, fname),
)
continue
if not isfile(fpath):
continue
fname2 = self.fixResFilename(fname, relPath)
core.trace(log, f"Using resource {fpath!r} as {fname2!r}")
yield self.readResFile(fname2, fpath, ext)
def __iter__(self) -> Iterator[EntryType]:
yield from self.readResDir(
self._contentsPath,
recurse=True,
)
yield from self.readResDir(
join(self._contentsPath, "Resources"),
recurse=True,
)
for entryBytes, articleAddress in self.yieldEntryBytes(
self._file,
self._properties,
):
entry = self.createEntry(entryBytes, articleAddress)
if entry is not None:
yield entry
def yieldEntryBytes(
self,
body_file: "io.BufferedIOBase",
properties: AppleDictProperties,
) -> Iterator[tuple[bytes, ArticleAddress]]:
fileDataOffset, fileLimit = guessFileOffsetLimit(body_file)
sectionOffset = fileDataOffset
while sectionOffset < fileLimit:
body_file.seek(sectionOffset + APPLEDICT_FILE_OFFSET)
self._absPos = body_file.tell()
# at the start of each section byte lengths of the section are encoded
next_section_jump = readInt(body_file)
data_byte_len = readInt(body_file)
if properties.body_compression_type > 0:
decompressed_byte_len = readInt(body_file) # noqa: F841
decompressed_bytes = body_file.read(data_byte_len - 4)
buffer = decompress(decompressed_bytes)
else:
buffer = body_file.read(data_byte_len)
pos = 0
while pos < len(buffer):
chunkLen, offset = self.getChunkLenOffset(pos, buffer)
articleAddress = ArticleAddress(sectionOffset, pos)
pos += offset
entryBytes = buffer[pos : pos + chunkLen]
pos += chunkLen
yield entryBytes, articleAddress
sectionOffset += next_section_jump + 4
| 20,820
|
Python
|
.py
| 649
| 28.322034
| 77
| 0.70964
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,854
|
appledict_file_tools.py
|
ilius_pyglossary/pyglossary/plugins/appledict_bin/appledict_file_tools.py
|
# -*- coding: utf-8 -*-
# Copyright © 2023 soshial <soshial@gmail.com> (soshial)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
from struct import unpack
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import io
__all__ = [
"APPLEDICT_FILE_OFFSET",
"guessFileOffsetLimit",
"readInt",
"read_2_bytes_here",
"read_x_bytes_as_word",
]
APPLEDICT_FILE_OFFSET = 0x40
# addressing of AppleDict binary files always ignores first 0x40 bytes
def readIntPair(buffer: "io.BufferedIOBase") -> tuple[int, int]:
# to satisfy mymy, put them in vars with declared type
a: int
b: int
a, b = unpack("ii", buffer.read(8))
return a, b
def readInt(buffer: "io.BufferedIOBase") -> int:
return unpack("i", buffer.read(4))[0]
def read_x_bytes_as_word(buffer: "io.BufferedIOBase", x: int) -> str:
return buffer.read(x).decode("UTF-16LE")
def read_2_bytes_here(buffer: "io.BufferedIOBase") -> int:
lower_byte = buffer.read(1)
higher_byte = buffer.read(1)
return ord(higher_byte) * 0x100 + ord(lower_byte)
def guessFileOffsetLimit(file: "io.BufferedIOBase") -> tuple[int, int]:
"""Returns address offset to start parsing from and EOF address."""
file.seek(APPLEDICT_FILE_OFFSET)
limit = readInt(file)
intPair = readIntPair(file)
if intPair == (0, -1): # 0000 0000 FFFF FFFF
return 0x20, limit
return 0x4, limit
| 1,905
|
Python
|
.py
| 50
| 36.26
| 72
| 0.749592
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,855
|
key_data.py
|
ilius_pyglossary/pyglossary/plugins/appledict_bin/key_data.py
|
# -*- coding: utf-8 -*-
# Copyright © 2023 soshial <soshial@gmail.com> (soshial)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import typing
__all__ = ["KeyData", "RawKeyData"]
RawKeyData: "typing.TypeAlias" = "tuple[int, int, typing.Sequence[str]]"
"""tuple(priority, parentalControl, keyTextFields)"""
"""
KeyText.data contains:
1. morphological data (opens article "make" when user enters "making")
and data that shows
2. data that encodes that searching "2 per cent", "2 percent",
or "2%" returns the same article
EXAMPLE: <d:index d:value="made" d:title="made (make)"/>
If the entry for "make" contains these <d:index> definitions,
the entry can be searched not only by "make" but also by "makes" or "made".
On the search result list, title value texts like "made" are displayed.
EXAMPLE: <d:index d:value="make it" d:title="make it" d:parental-control="1"
d:anchor="xpointer(//*[@id='make_it'])"/>
EXAMPLE: <d:index d:value="工夫する" d:title="工夫する"
d:yomi="くふうする" d:anchor="xpointer(//*[@id='kufuu-suru'])" />
EXAMPLE: <d:index d:value="'s finest" d:title="—'s finest"
d:DCSEntryTitle="fine" d:anchor="xpointer(//*[@id='m_en_gbus0362750.070'])"/>
user entered "'s finest", search list we show "—'s finest",
show article with title "fine" and point to element id = 'm_en_gbus0362750.070'
"""
# TODO: switch to dataclass
class KeyData:
"""
Dictionary entries are opened by entering different search texts.
This class contains texts by which entry is searchable and other properties.
"""
# keyword_data_id_xml = {
# "DCSKeyword": "d:value",
# # Search key -- if entered in search, this key will provide this definition.
# "DCSHeadword": "d:title",
# # Headword text that is displayed on the search result list.
# # When the value is the same as d:value, it can be omitted.
# # In that case, the value of the d:value is used also for the d:title.
# "DCSAnchor": "d:anchor",
# # Used to highlight a specific part in an entry.
# # For example, it is used to highlight an idiomatic phrase explanation
# # in an entry for a word.
# "DCSYomiWord": "d:yomi",
# # Used only in making Japanese dictionaries.
# "DCSSortKey": "d:DCSSortKey",
# # This value shows sorting (probably for non-english languages)
# "DCSEntryTitle": "d:DCSEntryTitle",
# # Headword displayed as article title
# }
__slots__ = [
"anchor",
"entryTitle",
"headword",
"keyword",
"parentalControl",
"priority",
]
def __init__( # noqa: PLR0913
self,
priority: int,
parentalControl: int,
keyword: str,
headword: str,
entryTitle: str,
anchor: str,
) -> None:
self.priority = priority
self.parentalControl = parentalControl
self.keyword = keyword
self.headword = headword
self.entryTitle = entryTitle
self.anchor = anchor
def toDict(self) -> dict[str, typing.Any]:
return {
"priority": self.priority,
"parentalControl": self.parentalControl,
"keyword": self.keyword,
"headword": self.headword,
"entryTitle": self.entryTitle,
"anchor": self.anchor,
}
@staticmethod
def fromRaw(rawKeyData: RawKeyData, keyTextFieldOrder: list[str]) -> KeyData:
priority, parentalControl, keyTextFields = rawKeyData
keyword = ""
headword = ""
entryTitle = ""
anchor = ""
for i, key_value in enumerate(keyTextFields):
key_type = keyTextFieldOrder[i]
if key_type == "DCSKeyword":
keyword = key_value
elif key_type == "DCSHeadword":
headword = key_value
elif key_type == "DCSEntryTitle":
entryTitle = key_value
elif key_type == "DCSAnchor":
anchor = key_value
return KeyData(
priority,
parentalControl,
keyword,
headword,
entryTitle,
anchor,
)
| 4,273
|
Python
|
.py
| 119
| 33.210084
| 80
| 0.716581
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,856
|
article_address.py
|
ilius_pyglossary/pyglossary/plugins/appledict_bin/article_address.py
|
# -*- coding: utf-8 -*-
# Copyright © 2023 soshial <soshial@gmail.com> (soshial)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
from typing import NamedTuple
__all__ = ["ArticleAddress"]
class ArticleAddress(NamedTuple):
sectionOffset: int
chunkOffset: int
def __str__(self) -> str:
return f"Addr[{hex(self.sectionOffset)}, {hex(self.chunkOffset)}]"
| 939
|
Python
|
.py
| 22
| 41.136364
| 72
| 0.768132
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,857
|
_types.py
|
ilius_pyglossary/pyglossary/plugins/dsl/_types.py
|
from __future__ import annotations
import typing
from collections.abc import Callable
from typing import TYPE_CHECKING
__all__ = [
"ErrorType",
"LexType",
"TitleLexType",
"TitleTransformerType",
"TransformerType",
]
class TransformerType(typing.Protocol):
start: int
pos: int
input: str
output: str
currentKey: str
attrs: "dict[str, str | None]"
attrName: str
audio: bool
resFileSet: "set[str]"
exampleColor: str
def __init__(self) -> None:
pass
def end(self) -> bool:
pass
def move(self, chars: int) -> None:
pass
def next(self) -> str:
pass
def follows(self, st: str) -> bool:
pass
def skipAny(self, chars: str) -> None:
pass
def addText(self, st: str) -> None:
pass
def resetBuf(self) -> str:
pass
class TitleTransformerType(TransformerType, typing.Protocol):
title: str
outputAlt: str
def addText2(self, st: str) -> None:
pass
if TYPE_CHECKING:
ErrorType = str | None
# it is an State Function (state as in state machine)
LexType = Callable[[TransformerType], tuple["LexType", ErrorType]] | None
TitleLexType = (
Callable[
[TitleTransformerType],
tuple["TitleLexType", ErrorType],
]
| None
)
| 1,176
|
Python
|
.py
| 54
| 19.296296
| 74
| 0.723481
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,858
|
test.py
|
ilius_pyglossary/pyglossary/plugins/dsl/test.py
|
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(dirname(__file__))))) # noqa: E402
from pyglossary.plugins.dsl.transform import Transformer
if __name__ == "__main__":
input = sys.argv[1]
tr = Transformer(
input,
currentKey="HEADWORD",
)
result, err = tr.transform()
if err:
print(f"Error: {err} in {input!r}")
elif result is None:
print("ERROR: result is None")
else:
print(result.output)
| 444
|
Python
|
.py
| 17
| 23.941176
| 78
| 0.709906
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,859
|
transform.py
|
ilius_pyglossary/pyglossary/plugins/dsl/transform.py
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, NamedTuple, cast
from xml.sax.saxutils import escape
from pyglossary.core import log
from .lex import lexRoot
if TYPE_CHECKING:
from ._types import ErrorType, LexType, TransformerType
__all__ = ["Transformer"]
re_comment_block = re.compile(r"\{\{([^}]*)\}\}")
class Result(NamedTuple):
output: str
resFileSet: set[str]
# called Lexer by Rob Pike in "Lexical Scanning" video)
class Transformer:
def __init__( # noqa: PLR0913
self,
input: str,
currentKey: str = "",
exampleColor: str = "steelblue",
audio: bool = True,
abbrev: str = "", # "" or "css"
abbrevDict: dict[str, str] | None = None,
) -> None:
self.input = input
self.start = 0
self.pos = 0
self.labelOpen = False
self.label = ""
self.output = ""
self.resFileSet: set[str] = set()
self.abbrev = abbrev
self.abbrevDict = abbrevDict
self.attrs: dict[str, str] = {}
self.attrName = ""
self.currentKey = currentKey
self.exampleColor = exampleColor
self.audio = audio
def end(self) -> bool:
return self.pos >= len(self.input)
def move(self, chars: int) -> None:
self.pos += chars
# self.absPos += chars
def next(self) -> str:
c = self.input[self.pos]
self.pos += 1
# self.absPos += 1
return c # noqa: RET504
def resetBuf(self) -> None:
self.start = self.pos
self.attrName = ""
self.attrs = {}
def follows(self, st: str) -> bool:
"""Check if current position follows the string `st`."""
pos = self.pos
for c in st:
if pos >= len(self.input):
return False
if self.input[pos] not in c:
return False
pos += 1
return True
def skipAny(self, chars: str) -> None:
"""Skip any of the characters that are in `chars`."""
pos = self.pos
while True:
if pos >= len(self.input):
break
if self.input[pos] not in chars:
break
pos += 1
self.pos = pos
def addHtml(self, st: str) -> None:
if self.labelOpen:
self.label += st
return
self.output += st
def addText(self, st: str) -> None:
st = escape(st)
if self.labelOpen:
self.label += st
return
self.output += st
def closeLabel(self):
# print(f"Label: {self.label!r}")
desc = None
if self.abbrev:
desc = self.abbrevDict.get(self.label)
if desc:
self.output += (
'<i class="p"><font color="green" '
f'title="{escape(desc)}">{self.label}</font></i>'
)
else:
self.output += (
'<i class="p"><font color="green">' + self.label + "</font></i>"
)
self.label = ""
self.labelOpen = False
def closeTag(self, tag: str) -> None:
assert tag
if tag == "m":
self.addHtml("</p>")
elif tag == "b":
self.addHtml("</b>")
elif tag in {"u", "'"}:
self.addHtml("</u>")
elif tag == "i":
self.addHtml("</i>")
elif tag == "sup":
self.addHtml("</sup>")
elif tag == "sub":
self.addHtml("</sub>")
elif tag in {"c", "t"}:
self.addHtml("</font>")
elif tag == "p":
self.closeLabel()
elif tag == "*":
self.addHtml("</span>")
elif tag == "ex":
self.addHtml("</font></span>")
elif tag in {
"ref",
"url",
"s",
"trn",
"!trn",
"trs",
"!trs",
"lang",
"com",
}:
pass
else:
log.warning(f"unknown close tag {tag!r}")
self.resetBuf()
def transform(self) -> tuple[Result | None, ErrorType]:
# TODO: implement these 2 with lex functions
self.input = re_comment_block.sub("", self.input)
lex: LexType = lexRoot
tr = cast("TransformerType", self)
while lex is not None:
lex, err = lex(tr)
if err:
return None, err
if self.labelOpen:
self.closeLabel()
return Result(self.output, self.resFileSet), None
| 3,657
|
Python
|
.py
| 148
| 21.432432
| 68
| 0.63451
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,860
|
title.py
|
ilius_pyglossary/pyglossary/plugins/dsl/title.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, NamedTuple, cast
from xml.sax.saxutils import escape
from pyglossary.core import log
from .transform import Transformer
if TYPE_CHECKING:
from ._types import ErrorType
from ._types import TitleLexType as LexType
from ._types import TitleTransformerType as TransformerType
__all__ = ["TitleTransformer"]
def lexRoot(tr: TransformerType) -> tuple[LexType, ErrorType]:
# if tr.start < tr.pos:
# log.warning(f"incomplete buffer near pos {tr.pos}")
if tr.end():
return None, None
c = tr.next()
if tr.end():
tr.addText2(c)
return None, None
if c == "\\":
return lexBackslash, None
if c == "(":
# tr.resetBuf()
return lexParan, None
if c == "{":
return lexCurly, None
tr.addText2(c)
# tr.resetBuf()
return lexRoot, None
def lexBackslash(tr: TransformerType) -> tuple[LexType, ErrorType]:
c = tr.next()
tr.addText2(c)
# tr.resetBuf()
return lexRoot, None
def lexParan(tr: TransformerType) -> tuple[LexType, ErrorType]:
while True:
if tr.end():
log.warning(f"unclosed '(' near pos {tr.pos}")
return None, None
c = tr.next()
if c == "\\":
if tr.end():
log.warning("unclosed '(' near pos {tr.pos}")
return None, None
tr.addText("\\" + tr.next())
continue
if c == ")":
break
tr.addText(c)
return lexRoot, None
def lexCurly(tr: TransformerType) -> tuple[LexType, ErrorType]:
start = tr.pos
while True:
if tr.end():
log.warning("unclosed '{{' near pos {tr.pos}")
return None, None
c = tr.next()
if c == "\\":
if tr.end():
log.warning("unclosed '{{' near pos {tr.pos}")
return None, None
tr.next()
continue
if c == "}":
break
tr2 = Transformer(tr.input[start : tr.pos - 1])
res, err = tr2.transform()
if err or res is None:
return None, err
tr.title += res.output
return lexRoot, None
class TitleResult(NamedTuple):
output: str
outputAlt: str
class TitleTransformer:
def __init__(
self,
input: str,
) -> None:
self.input = input
# self.start = 0
self.pos = 0
self.output = ""
self.outputAlt = ""
self.title = ""
def end(self) -> bool:
return self.pos >= len(self.input)
def move(self, chars: int) -> None:
self.pos += chars
def next(self) -> str:
c = self.input[self.pos]
self.pos += 1
return c # noqa: RET504
# def resetBuf(self) -> str:
# self.start = self.pos
def addText(self, st: str) -> None:
self.output += escape(st)
self.title += escape(st)
def addText2(self, st: str) -> None:
esc = escape(st)
self.output += esc
self.outputAlt += esc
self.title += esc
def transform(self) -> tuple[TitleResult | None, ErrorType]:
lex: LexType = lexRoot
tr = cast("TransformerType", self)
while lex is not None:
lex, err = lex(tr)
if err:
return None, err
return TitleResult(
output=self.output,
outputAlt=self.outputAlt,
), None
| 2,908
|
Python
|
.py
| 114
| 22.508772
| 67
| 0.676225
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,861
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/dsl/__init__.py
|
# -*- coding: utf-8 -*-
# dsl/__init__.py
# Read ABBYY Lingvo DSL dictionary format
#
# Copyright © 2013-2020 Saeed Rasooli <saeed.gnu@gmail.com>
# Copyright © 2016 ivan tkachenko me@ratijas.tk
# Copyright © 2013 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import html
import html.entities
import re
from collections.abc import Iterator
from os.path import abspath, dirname, isfile, join, splitext
from typing import TYPE_CHECKING, cast
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import log
from pyglossary.io_utils import nullTextIO
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
StrOption,
)
from pyglossary.os_utils import indir
from pyglossary.text_reader import TextFilePosWrapper
from .title import TitleTransformer
from .transform import Transformer
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "dsl"
format = "ABBYYLingvoDSL"
description = "ABBYY Lingvo DSL (.dsl)"
extensions = (".dsl",)
extensionCreate = ".dsl"
singleFile = True
kind = "text"
wiki = "https://ru.wikipedia.org/wiki/ABBYY_Lingvo"
website = (
"https://www.lingvo.ru/",
"www.lingvo.ru",
)
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"audio": BoolOption(
comment="Enable audio objects",
),
"example_color": StrOption(
comment="Examples color",
),
"abbrev": StrOption(
customValue=False,
values=["", "hover"],
comment="Load and apply abbreviation file (`_abrv.dsl`)",
),
}
# ABBYY is a Russian company
# https://ru.wikipedia.org/wiki/ABBYY_Lingvo
# http://lingvo.helpmax.net/en/troubleshooting/dsl-compiler/compiling-a-dictionary/
# https://www.abbyy.com/news/abbyy-lingvo-80-dictionaries-to-suit-every-taste/
# {{{
# modified to work around codepoints that are not supported by `unichr`.
# http://effbot.org/zone/re-sub.htm#unescape-html
# January 15, 2003 | Fredrik Lundh
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
htmlEntityPattern = re.compile(r"&#?\w+;")
def unescape(text: str) -> str:
def fixup(m: "re.Match") -> str:
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
i = int(text[3:-1], 16) if text[:3] == "&#x" else int(text[2:-1])
except ValueError:
pass
else:
try:
return chr(i)
except ValueError:
# f"\\U{i:08x}", but no fb"..."
return (b"\\U%08x" % i).decode("unicode-escape")
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return htmlEntityPattern.sub(fixup, text)
# }}}
# precompiled regexs
re_wrapped_in_quotes = re.compile("^(\\'|\")(.*)(\\1)$")
def unwrap_quotes(s: str) -> str:
return re_wrapped_in_quotes.sub("\\2", s)
class Reader:
compressions = stdCompressions + ("dz",)
_encoding: str = ""
_audio: bool = True
_example_color: str = "steelblue"
_abbrev: str = "hover"
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._dirPath = ""
self._file: "io.TextIOBase" = nullTextIO
self._fileSize = 0
self._bufferLine = ""
self._resFileSet: set[str] = set()
self._includes: list[Reader] = []
self._abbrevDict: dict[str, str] = {}
def transform(
self,
text: str,
header: str,
) -> str:
tr = Transformer(
text,
currentKey=header,
audio=self._audio,
exampleColor=self._example_color,
abbrev=self._abbrev,
abbrevDict=self._abbrevDict if self._abbrev else None,
)
try:
result, err = tr.transform()
except Exception:
log.exception(f"{text = }")
return ""
if err:
log.error(f"error in transforming {text!r}: {err}")
return ""
if result is None:
log.error(f"error in transforming {text!r}: result is None")
return ""
resText = result.output.strip()
self._resFileSet.update(tr.resFileSet)
return resText
def close(self) -> None:
self._file.close()
self._file = nullTextIO
def __len__(self) -> int:
# FIXME
return 0
def open(
self,
filename: str,
) -> None:
self._filename = filename
self._dirPath = abspath(dirname(self._filename))
encoding = self._encoding
if not encoding:
encoding = self.detectEncoding()
cfile = cast(
"io.TextIOBase",
compressionOpen(
filename,
dz=True,
mode="rt",
encoding=encoding,
),
)
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
# self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("DSL Reader: file is not seekable")
self._file = TextFilePosWrapper(cfile, encoding)
# read header
for line in self._file:
line = line.rstrip().lstrip("\ufeff") # noqa: B005, PLW2901
# \ufeff -> https://github.com/ilius/pyglossary/issues/306
if not line:
continue
if not line.startswith("#"):
self._bufferLine = line
break
self.processHeaderLine(line)
if self._abbrev:
self.loadAbbrevFile()
def loadAbbrevFile(self):
baseName, _ = splitext(self._filename)
abbrevName = baseName + "_abrv.dsl"
if not isfile(abbrevName):
return
log.info(f"Reading abbrevation file {abbrevName!r}")
reader = Reader(self._glos)
reader.open(abbrevName)
for entry in reader:
for word in entry.l_word:
self._abbrevDict[word] = entry.defi
reader.close()
def detectEncoding(self) -> str:
for testEncoding in (
"utf-8",
"utf-16",
"utf-16-le",
"utf-16-be",
):
with compressionOpen(
self._filename,
dz=True,
mode="rt",
encoding=testEncoding,
) as fileObj:
try:
for _ in range(10):
fileObj.readline()
except (UnicodeDecodeError, UnicodeError):
log.info(f"Encoding of DSL file is not {testEncoding}")
continue
else:
log.info(f"Encoding of DSL file detected: {testEncoding}")
return testEncoding
raise ValueError(
"Could not detect encoding of DSL file"
", specify it by: --read-options encoding=ENCODING",
)
def setInfo(self, key: str, value: str) -> None:
self._glos.setInfo(key, unwrap_quotes(value))
def processHeaderLine(self, line: str) -> None:
if line.startswith("#NAME"):
self.setInfo("name", unwrap_quotes(line[6:].strip()))
elif line.startswith("#INDEX_LANGUAGE"):
self._glos.sourceLangName = unwrap_quotes(line[16:].strip())
elif line.startswith("#CONTENTS_LANGUAGE"):
self._glos.targetLangName = unwrap_quotes(line[19:].strip())
elif line.startswith("#INCLUDE"):
self.processInclude(unwrap_quotes(line[9:].strip()))
def processInclude(self, filename: str) -> None:
reader = Reader(self._glos)
reader._audio = self._audio
reader._example_color = self._example_color
with indir(self._dirPath):
reader.open(filename)
self._includes.append(reader)
def _iterLines(self) -> Iterator[str]:
if self._bufferLine:
line = self._bufferLine
self._bufferLine = ""
yield line
for line in self._file:
yield line
@staticmethod
def sub_title_line(m: "re.Match") -> str:
line = m.group(0)[1:-1]
line = line.replace("[']", "") # FIXME
line = line.replace("[/']", "")
return line # noqa: RET504
def __iter__(self) -> Iterator[EntryType]:
for reader in self._includes:
yield from reader
reader.close()
term_lines: list[str] = []
text_lines: list[str] = []
for line in self._iterLines():
if not line.strip():
continue
if line.startswith((" ", "\t")): # text
text_lines.append(line)
continue
# header or alt
if text_lines:
yield self.parseEntryBlock(term_lines, text_lines)
term_lines = []
text_lines = []
term_lines.append(line)
if text_lines:
yield self.parseEntryBlock(term_lines, text_lines)
resDir = dirname(self._filename)
for fname in sorted(self._resFileSet):
fpath = join(resDir, fname)
if not isfile(fpath):
log.warning(f"resource file not found: {fname}")
continue
with open(fpath, mode="rb") as _file:
data = _file.read()
yield self._glos.newDataEntry(fname, data)
def parseEntryBlock(
self,
term_lines: list[str],
text_lines: list[str],
) -> EntryType:
terms = []
defiTitles = []
for line in term_lines:
tr = TitleTransformer(line)
res, err = tr.transform()
if err:
log.error(err)
continue
if res is None:
log.error(f"res is None for line={line!r}")
continue
term = res.output.strip()
terms.append(term)
term2 = res.outputAlt.strip()
if term2 != term:
terms.append(term2)
title = tr.title.strip()
if title != term:
defiTitles.append("<b>" + title + "</b>")
defi = self.transform(
text="".join(text_lines),
header=terms[0],
)
if defiTitles:
defi = "<br/>".join(defiTitles + [defi])
return self._glos.newEntry(
terms,
defi,
byteProgress=(
(self._file.tell(), self._fileSize) if self._fileSize else None
),
)
| 9,794
|
Python
|
.py
| 349
| 24.939828
| 83
| 0.69393
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,862
|
lex.py
|
ilius_pyglossary/pyglossary/plugins/dsl/lex.py
|
from __future__ import annotations
from os.path import splitext
from typing import TYPE_CHECKING
from xml.sax.saxutils import escape, quoteattr
from pyglossary.core import log
if TYPE_CHECKING:
from ._types import ErrorType, LexType, TransformerType
__all__ = ["lexRoot"]
# rename to lexText?
def lexRoot(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.start < tr.pos:
log.warning(f"incomplete buffer near pos {tr.pos}")
if tr.end():
# if tr.openParenth > 0:
# return None, "unexpected: unclosed '('"
return None, None
c = tr.next()
if tr.end():
tr.addText(c)
return None, None
if c == "\\":
return lexBackslash, None
if c == "[":
tr.resetBuf()
return lexTag, None
if c == "]":
tr.next()
if tr.follows("["):
tr.next()
tr.output += c
tr.resetBuf()
return lexRoot, None
if c == "~":
tr.addText(tr.currentKey)
tr.resetBuf()
return lexRoot, None
if c == "\n":
return lexRootNewline, None
if c == "<" and tr.follows("<"):
tr.next()
return lexRefText, None
tr.addText(c)
tr.resetBuf()
return lexRoot, None
def lexRootNewline(tr: TransformerType) -> tuple[LexType, ErrorType]:
tr.skipAny(" \t")
if not tr.follows("[m"):
tr.addHtml("<br/>")
tr.resetBuf()
return lexRoot, None
def lexBackslash(tr: TransformerType) -> tuple[LexType, ErrorType]:
c = tr.next()
if c == " ":
tr.addHtml(" ")
elif c in "<>" and tr.follows(c):
tr.next()
tr.addText(2 * c)
else:
tr.addText(c)
tr.resetBuf()
return lexRoot, None
def lexTag(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.end():
return None, f"'[' not closed near pos {tr.pos} in lexTag"
c = tr.next()
if c == "[":
tr.output += c
tr.resetBuf()
return lexRoot, None
if c in " \t":
tr.skipAny(" \t")
return lexTagAttr, None
if c == "]":
tag = tr.input[tr.start : tr.pos - 1]
if not tag:
return None, f"empty tag near pos {tr.pos}"
return processTag(tr, tag)
# if c == '\\':
# return lexTagBackslash, None
# do not advance tr.start
return lexTag, None
def lexTagAttr(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.end():
tr.attrs[tr.attrName] = None
tr.resetBuf()
return lexRoot, None
c = tr.next()
if c == "]":
tr.attrs[tr.attrName] = None
tr.move(-1)
return lexTag, None
if c == "=":
tr.skipAny(" \t")
return lexTagAttrValue, None
tr.attrName += c
return lexTagAttr, None
def lexTagAttrValue(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.end():
return None, f"'[' not closed near pos {tr.pos} in lexTagAttrValue(1)"
c = tr.next()
quote = ""
value = ""
if c in "'\"":
if tr.end():
return None, f"'[' not closed near pos {tr.pos} in lexTagAttrValue(2)"
quote = c
else:
value += c
while True:
if tr.end():
return None, f"'[' not closed near pos {tr.pos} in lexTagAttrValue(3)"
c = tr.next()
if c == "\\":
if tr.end():
return None, f"'[' not closed near pos {tr.pos} in lexTagAttrValue(3)"
c = tr.next()
value += c
continue
if c == "]":
tr.move(-1)
break
if c == quote:
break
if not quote and c in " \t":
break
value += c
tr.attrs[tr.attrName] = value
return lexTag, None
r"""
[m{}] => <p style="padding-left:{}em;margin:0">
[*] => <span class="sec">
[ex] => <span class="ex"><font color="{exampleColor}">
[c] => <font color="green">
[p] => <i class="p"><font color="green">
['] => <u>
[b] => <b>
[i] => <i>
[u] => <u>
[sup] => <sup>
[sub] => <sub>
[ref] \
[url] } => <a href={}>{}</a>
<<...>> /
[s] => <object type="audio/x-wav" data="{}" width="40" height="40">
<param name="autoplay" value="false" />
</object>
[s] => <img align="top" src="{}" alt="{}" />
[t] => <font face="Helvetica" class="dsl_t">
{{...}} \
[trn] |
[!trn] |
[trs] } => remove
[!trs] |
[lang ...] |
[com] /
"""
def lexRefText(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.end():
return None, None
text = ""
while not tr.end():
c = tr.next()
if c == "\\":
if tr.end():
break
text += tr.next()
continue
if c == "[":
tr.move(-1)
break
if c == ">" and tr.follows(">"):
tr.next()
break
text += c
target = tr.attrs.get("target")
if not target:
target = text
tr.addHtml(f'<a href={quoteattr("bword://" + target)}>{escape(text)}</a>')
tr.resetBuf()
return lexRoot, None
def lexUrlText(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.end():
return None, None
text = ""
while not tr.end():
c = tr.next()
if c == "\\":
if tr.end():
break
text += tr.next()
continue
if c == "[":
tr.move(-1)
break
text += c
target = tr.attrs.get("target")
if not target:
target = text
if "://" not in target:
target = "http://" + target
tr.addHtml(f"<a href={quoteattr(target)}>{escape(text)}</a>")
tr.resetBuf()
return lexRoot, None
def lexTagS(tr: TransformerType) -> tuple[LexType, ErrorType]:
if tr.end():
return None, None
fname = ""
while not tr.end():
c = tr.next()
if c == "[":
tr.move(-1)
break
fname += c
_, ext = splitext(fname)
ext = ext.lstrip(".")
if ext in {"wav", "mp3"}:
if tr.audio:
tr.addHtml(
rf'<object type="audio/x-wav" data="{fname}" '
'width="40" height="40">'
'<param name="autoplay" value="false" />'
"</object>"
)
elif ext in {"jpg", "jpeg", "gif", "tif", "tiff", "png", "bmp"}:
tr.addHtml(rf'<img align="top" src="{fname}" alt="{fname}" />')
else:
log.warning(f"unknown file extension in {fname!r}")
tr.resFileSet.add(fname)
tr.resetBuf()
return lexRoot, None
def processTagM(tr: TransformerType, tag: str) -> tuple[LexType, ErrorType]:
padding = "0.3"
if len(tag) > 1:
padding = tag[1:]
if padding == "0":
padding = "0.3"
tr.addHtml(f'<p style="padding-left:{padding}em;margin:0">')
tr.resetBuf()
return lexRoot, None
def lexTagC(tr: TransformerType) -> tuple[LexType, ErrorType]:
color = "green"
for key, value in tr.attrs.items():
if value is None:
color = key
break
tr.addHtml(f'<font color="{color}">')
tr.resetBuf()
return lexRoot, None
# PLR0912 Too many branches (19 > 12)
def processTag(tr: TransformerType, tag: str) -> tuple[LexType, ErrorType]: # noqa: PLR0912
tr.attrName = ""
if not tag:
tr.resetBuf()
return lexRoot, None
if tag[0] == "/":
tr.closeTag(tag[1:])
return lexRoot, None
tag = tag.split(" ")[0]
if tag == "ref":
return lexRefText(tr)
if tag == "url":
return lexUrlText(tr)
if tag == "s":
return lexTagS(tr)
if tag == "c":
return lexTagC(tr)
if tag[0] == "m":
return processTagM(tr, tag)
if tag == "p":
tr.labelOpen = True
tr.resetBuf()
return lexRoot, None
if tag == "*":
tr.addHtml('<span class="sec">')
elif tag == "ex":
tr.addHtml(f'<span class="ex"><font color="{tr.exampleColor}">')
elif tag == "t":
tr.addHtml('<font face="Helvetica" class="dsl_t">')
elif tag == "i":
tr.addHtml("<i>")
elif tag == "b":
tr.addHtml("<b>")
elif tag == "u":
tr.addHtml("<u>")
elif tag == "'":
tr.addHtml('<u class="accent">')
elif tag == "sup":
tr.addHtml("<sup>")
elif tag == "sub":
tr.addHtml("<sub>")
elif tag in {
"trn",
"!trn",
"trs",
"!trs",
"lang",
"com",
}:
pass
else:
log.warning(f"unknown tag {tag!r}")
tr.resetBuf()
return lexRoot, None
# def lexTagBackslash(tr: TransformerType) -> tuple[LexType, ErrorType]:
| 7,362
|
Python
|
.py
| 304
| 21.519737
| 92
| 0.622604
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,863
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/xdxf_css/__init__.py
|
# -*- coding: utf-8 -*-
# xdxf/__init__.py
from __future__ import annotations
"""xdxf file format reader and utils to convert xdxf to html."""
#
# Copyright © 2023 Saeed Rasooli
# Copyright © 2016 ivan tkachenko me@ratijas.tk
#
# some parts of this file include code from:
# Aard Dictionary Tools <http://aarddict.org>.
# Copyright © 2008-2009 Igor Tkach
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re
import typing
from collections.abc import Iterator, Sequence
from os.path import join
from typing import TYPE_CHECKING, cast
from pyglossary.option import BoolOption
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element
from pyglossary.option import Option
from lxml import etree as ET
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import log, rootDir
from pyglossary.io_utils import nullBinaryIO
from pyglossary.text_utils import toStr
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "xdxf_css"
format = "XdxfCss"
description = "XDXF with CSS and JS"
extensions = ()
extensionCreate = ".xdxf"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/XDXF"
website = (
"https://github.com/soshial/xdxf_makedict/tree/master/format_standard",
"XDXF standard - @soshial/xdxf_makedict",
)
optionsProp: "dict[str, Option]" = {
"html": BoolOption(comment="Entries are HTML"),
}
"""
new format
<xdxf ...>
<meta_info>
<!--All meta information about the dictionary: its title, author etc.!-->
<basename>...</basename>
<full_title>...</full_title>
<description>...</description>
</meta_info>
<lexicon>
<ar>article 1</ar>
<ar>article 2</ar>
<ar>article 3</ar>
<ar>article 4</ar>
...
</lexicon>
</xdxf>
old format
<xdxf ...>
<full_name>...</full_name>
<description>...</description>
<ar>article 1</ar>
<ar>article 2</ar>
<ar>article 3</ar>
<ar>article 4</ar>
...
</xdxf>
"""
if TYPE_CHECKING:
class TransformerType(typing.Protocol):
def transform(self, article: Element) -> str: ...
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
_html: bool = True
infoKeyMap = {
"full_name": "name",
"full_title": "name",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: "io.IOBase" = nullBinaryIO
self._encoding = "utf-8"
self._htmlTr: "TransformerType | None" = None
self._re_span_k = re.compile(
'<span class="k">[^<>]*</span>(<br/>)?',
)
self._has_added_css: bool = False
self._has_added_js: bool = False
self._abbr_defs_js: bytes
def makeTransformer(self) -> None:
from pyglossary.xdxf.css_js_transform import XdxfTransformer
self._htmlTr = XdxfTransformer(encoding=self._encoding)
def open(self, filename: str) -> None: # noqa: PLR0912
# <!DOCTYPE xdxf SYSTEM "http://xdxf.sourceforge.net/xdxf_lousy.dtd">
self._filename = filename
self.makeTransformer()
self._glos.setDefaultDefiFormat("h")
cfile = self._file = cast(
"io.IOBase",
compressionOpen(
self._filename,
mode="rb",
),
)
context = ET.iterparse( # type: ignore
cfile,
events=("end",),
)
abbr_defs = []
for _, _elem in context:
elem = cast("Element", _elem)
if elem.tag in {"meta_info", "ar", "k", "abr", "dtrn"}:
break
# every other tag before </meta_info> or </ar> is considered info
if elem.tag == "abbr_def":
abbr_defs.append(elem)
continue
# in case of multiple <from> or multiple <to> tags, the last one
# will be stored.
# Very few formats support more than one language pair in their
# metadata, so it's not very useful to have multiple
if elem.tag == "from":
for key, value in elem.attrib.items():
if key.endswith("}lang"):
self._glos.sourceLangName = value.split("-")[0]
break
continue
if elem.tag == "to":
for key, value in elem.attrib.items():
if key.endswith("}lang"):
self._glos.targetLangName = value.split("-")[0]
break
continue
if not elem.text:
log.warning(f"empty tag <{elem.tag}>")
continue
key = self.infoKeyMap.get(elem.tag, elem.tag)
self._glos.setInfo(key, elem.text)
self._abbr_defs_js = self.generate_abbr_js(abbr_defs)
del context
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("XDXF Reader: file is not seekable")
self._file.close()
self._file = compressionOpen(self._filename, mode="rb")
def __len__(self) -> int:
return 0
def __iter__(self) -> Iterator[EntryType]:
context = ET.iterparse( # type: ignore
self._file,
events=("end",),
tag="ar",
)
if self._has_added_css is False:
self._has_added_css = True
cssPath = join(rootDir, "pyglossary", "xdxf", "xdxf.css")
with open(cssPath, "rb") as css_file:
yield self._glos.newDataEntry("css/xdxf.css", css_file.read())
if self._abbr_defs_js is not None and not self._has_added_js:
self._has_added_js = True
yield self._glos.newDataEntry("js/xdxf.js", self._abbr_defs_js)
for _, _article in context:
article = cast("Element", _article)
article.tail = None
words = [toStr(w) for w in self.titles(article)]
defi = self._htmlTr.transform(article)
defiFormat = "h"
if len(words) == 1:
defi = self._re_span_k.sub("", defi)
defi = f"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="css/xdxf.css"/>
</head>
<body>
{defi}
<script type="text/javascript" src="js/xdxf.js"></script>
</body>
</html>"""
# log.info(f"{defi=}, {words=}")
yield self._glos.newEntry(
words,
defi,
defiFormat=defiFormat,
byteProgress=(self._file.tell(), self._fileSize),
)
# clean up preceding siblings to save memory
# this can reduce memory usage from 1 GB to ~25 MB
parent = article.getparent()
if parent is None:
continue
while article.getprevious() is not None:
del parent[0]
def close(self) -> None:
self._file.close()
self._file = nullBinaryIO
def generate_abbr_js(self, abbr_defs: list["Element"]) -> bytes:
abbr_map_js = """const abbr_map = new Map();\n"""
for abbr_def in abbr_defs:
abbr_k_list: list[str] = []
abbr_v_text = ""
for child in abbr_def.xpath("child::node()"):
if child.tag == "abbr_k":
abbr_k_list.append(self._htmlTr.stringify_children(child))
if child.tag == "abbr_v":
abbr_v_text = self._htmlTr.stringify_children(child)
# TODO escape apostrophes
for abbr_k in abbr_k_list:
if len(abbr_k) > 0 and len(abbr_v_text) > 0:
abbr_map_js += f"abbr_map.set('{abbr_k}', '{abbr_v_text}');\n"
with open(join(rootDir, "pyglossary", "xdxf", "xdxf.js"), "rb") as js_file:
return abbr_map_js.encode(encoding="utf-8") + js_file.read()
@staticmethod
def tostring(
elem: Element,
) -> str:
return (
ET.tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
def titles(self, article: Element) -> list[str]:
"""
:param article: <ar> tag
:return: (title (str) | None, alternative titles (set))
"""
from itertools import combinations
titles: list[str] = []
for title_element in article.findall("k"):
if title_element.text is None:
# TODO: look for <opt> tag?
log.warning(f"empty title element: {self.tostring(title_element)}")
continue
n_opts = len([c for c in title_element if c.tag == "opt"])
if n_opts:
titles += [
self._mktitle(title_element, comb)
for j in range(n_opts + 1)
for comb in combinations(list(range(n_opts)), j)
]
else:
titles.append(self._mktitle(title_element))
return titles
def _mktitle( # noqa: PLR6301
self,
title_element: Element,
include_opts: "Sequence | None" = None,
) -> str:
if include_opts is None:
include_opts = ()
title = title_element.text or ""
opt_i = -1
for c in title_element:
if c.tag == "nu" and c.tail:
if title:
title += c.tail
else:
title = c.tail
if c.tag == "opt" and c.text is not None:
opt_i += 1
if opt_i in include_opts:
title += c.text
if c.tail:
title += c.tail
return title.strip()
| 8,942
|
Python
|
.py
| 309
| 25.773463
| 77
| 0.676019
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,864
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/xdxf/__init__.py
|
# -*- coding: utf-8 -*-
# xdxf/__init__.py
from __future__ import annotations
"""xdxf file format reader and utils to convert xdxf to html."""
#
# Copyright © 2023 Saeed Rasooli
# Copyright © 2016 ivan tkachenko me@ratijas.tk
#
# some parts of this file include code from:
# Aard Dictionary Tools <http://aarddict.org>.
# Copyright © 2008-2009 Igor Tkach
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import re
import typing
from collections.abc import Iterator, Sequence
from typing import TYPE_CHECKING, cast
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.lxml_types import Element
from lxml import etree as ET
from pyglossary.compression import (
compressionOpen,
stdCompressions,
)
from pyglossary.core import log
from pyglossary.io_utils import nullBinaryIO
from pyglossary.option import (
BoolOption,
Option,
)
from pyglossary.text_utils import toStr
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "xdxf"
format = "Xdxf"
description = "XDXF (.xdxf)"
extensions = (".xdxf",)
extensionCreate = ".xdxf"
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/XDXF"
website = (
"https://github.com/soshial/xdxf_makedict/tree/master/format_standard",
"XDXF standard - @soshial/xdxf_makedict",
)
optionsProp: "dict[str, Option]" = {
"html": BoolOption(comment="Entries are HTML"),
"xsl": BoolOption(
comment="Use XSL transformation",
),
}
"""
new format
<xdxf ...>
<meta_info>
<!--All meta information about the dictionary: its title, author etc.!-->
<basename>...</basename>
<full_title>...</full_title>
<description>...</description>
</meta_info>
<lexicon>
<ar>article 1</ar>
<ar>article 2</ar>
<ar>article 3</ar>
<ar>article 4</ar>
...
</lexicon>
</xdxf>
old format
<xdxf ...>
<full_name>...</full_name>
<description>...</description>
<ar>article 1</ar>
<ar>article 2</ar>
<ar>article 3</ar>
<ar>article 4</ar>
...
</xdxf>
"""
if TYPE_CHECKING:
class TransformerType(typing.Protocol):
def transform(self, article: Element) -> str: ...
class Reader:
compressions = stdCompressions
depends = {
"lxml": "lxml",
}
_html: bool = True
_xsl: bool = False
infoKeyMap = {
"full_name": "name",
"full_title": "name",
}
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._filename = ""
self._file: "io.IOBase" = nullBinaryIO
self._encoding = "utf-8"
self._htmlTr: "TransformerType | None" = None
self._re_span_k = re.compile(
'<span class="k">[^<>]*</span>(<br/>)?',
)
def makeTransformer(self) -> None:
if self._xsl:
from pyglossary.xdxf.xsl_transform import XslXdxfTransformer
self._htmlTr = XslXdxfTransformer(encoding=self._encoding)
return
from pyglossary.xdxf.transform import XdxfTransformer
self._htmlTr = XdxfTransformer(encoding=self._encoding)
def open(self, filename: str) -> None: # noqa: PLR0912
# <!DOCTYPE xdxf SYSTEM "http://xdxf.sourceforge.net/xdxf_lousy.dtd">
self._filename = filename
if self._html:
self.makeTransformer()
self._glos.setDefaultDefiFormat("h")
else:
self._glos.setDefaultDefiFormat("x")
cfile = self._file = cast(
"io.IOBase",
compressionOpen(
self._filename,
mode="rb",
),
)
context = ET.iterparse( # type: ignore
cfile,
events=("end",),
)
for _, _elem in context:
elem = cast("Element", _elem)
if elem.tag in {"meta_info", "ar", "k", "abr", "dtrn"}:
break
# every other tag before </meta_info> or </ar> is considered info
if elem.tag == "abbr_def":
continue
# in case of multiple <from> or multiple <to> tags, the last one
# will be stored.
# Very few formats support more than one language pair in their
# metadata, so it's not very useful to have multiple
if elem.tag == "from":
for key, value in elem.attrib.items():
if key.endswith("}lang"):
self._glos.sourceLangName = value.split("-")[0]
break
continue
if elem.tag == "to":
for key, value in elem.attrib.items():
if key.endswith("}lang"):
self._glos.targetLangName = value.split("-")[0]
break
continue
if not elem.text:
log.warning(f"empty tag <{elem.tag}>")
continue
key = self.infoKeyMap.get(elem.tag, elem.tag)
self._glos.setInfo(key, elem.text)
del context
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("XDXF Reader: file is not seekable")
self._file.close()
self._file = compressionOpen(self._filename, mode="rb")
def __len__(self) -> int:
return 0
def __iter__(self) -> Iterator[EntryType]:
context = ET.iterparse( # type: ignore
self._file,
events=("end",),
tag="ar",
)
for _, _article in context:
article = cast("Element", _article)
article.tail = None
words = [toStr(w) for w in self.titles(article)]
if self._htmlTr:
defi = self._htmlTr.transform(article)
defiFormat = "h"
if len(words) == 1:
defi = self._re_span_k.sub("", defi)
else:
b_defi = cast(bytes, ET.tostring(article, encoding=self._encoding))
defi = b_defi[4:-5].decode(self._encoding).strip()
defiFormat = "x"
# log.info(f"{defi=}, {words=}")
yield self._glos.newEntry(
words,
defi,
defiFormat=defiFormat,
byteProgress=(self._file.tell(), self._fileSize),
)
# clean up preceding siblings to save memory
# this can reduce memory usage from 1 GB to ~25 MB
parent = article.getparent()
if parent is None:
continue
while article.getprevious() is not None:
del parent[0]
def close(self) -> None:
self._file.close()
self._file = nullBinaryIO
@staticmethod
def tostring(
elem: Element,
) -> str:
return (
ET.tostring(
elem,
method="html",
pretty_print=True,
)
.decode("utf-8")
.strip()
)
def titles(self, article: Element) -> list[str]:
"""
:param article: <ar> tag
:return: (title (str) | None, alternative titles (set))
"""
from itertools import combinations
titles: list[str] = []
for title_element in article.findall("k"):
if title_element.text is None:
# TODO: look for <opt> tag?
log.warning(f"empty title element: {self.tostring(title_element)}")
continue
n_opts = len([c for c in title_element if c.tag == "opt"])
if n_opts:
titles += [
self._mktitle(title_element, comb)
for j in range(n_opts + 1)
for comb in combinations(list(range(n_opts)), j)
]
else:
titles.append(self._mktitle(title_element))
return titles
def _mktitle( # noqa: PLR6301
self,
title_element: Element,
include_opts: "Sequence | None" = None,
) -> str:
if include_opts is None:
include_opts = ()
title = title_element.text or ""
opt_i = -1
for c in title_element:
if c.tag == "nu" and c.tail:
if title:
title += c.tail
else:
title = c.tail
if c.tag == "opt" and c.text is not None:
opt_i += 1
if opt_i in include_opts:
title += c.text
if c.tail:
title += c.tail
return title.strip()
| 7,809
|
Python
|
.py
| 286
| 24.167832
| 75
| 0.680118
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,865
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/octopus_mdict_new/__init__.py
|
# -*- coding: utf-8 -*-
# Read Octopus MDict dictionary format, mdx(dictionary)/mdd(data)
#
# Copyright © 2013 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
# Copyright © 2013-2021 Saeed Rasooli <saeed.gnu@gmail.com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import gc
import os
import re
import sys
from collections.abc import Iterator
from os.path import dirname, extsep, isfile, join, splitext
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
from pyglossary.plugin_lib.readmdict import MDD, MDX
from pyglossary.core import log
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
from pyglossary.text_utils import toStr
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "octopus_mdict"
format = "OctopusMdict"
description = "Octopus MDict (.mdx)"
extensions = (".mdx",)
extensionCreate = ""
singleFile = False
kind = "binary"
wiki = ""
website = (
"https://www.mdict.cn/wp/?page_id=5325&lang=en",
"Download | MDict.cn",
)
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"substyle": BoolOption(
comment="Enable substyle",
),
"same_dir_data_files": BoolOption(
comment="Read data files from same directory",
),
"audio": BoolOption(
comment="Enable audio objects",
),
}
extraDocs = [
(
"`python-lzo` is required for **some** MDX glossaries.",
"""First try converting your MDX file, if failed (`AssertionError` probably),
then try to install [LZO library and Python binding](./doc/lzo.md).""",
),
]
class Reader:
_encoding: str = ""
_substyle: bool = True
_same_dir_data_files: bool = False
_audio: bool = False
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self.clear()
self._re_internal_link = re.compile("href=([\"'])(entry://|[dx]:)")
self._re_audio_link = re.compile(
'<a (type="sound" )?([^<>]*? )?href="sound://([^<>"]+)"( .*?)?>(.*?)</a>',
)
def clear(self) -> None:
self._filename = ""
self._mdx: "MDX | None" = None
self._mdd: list[MDD] = []
self._wordCount = 0
self._dataEntryCount = 0
# dict of mainWord -> newline-separated alternatives
self._linksDict: "dict[str, str]" = {}
def open(self, filename: str) -> None:
from pyglossary.plugin_lib.readmdict import MDD, MDX
self._filename = filename
self._mdx = MDX(filename, self._encoding, self._substyle)
"""
multiple MDD files are supported with this naming schema:
FILE.mdx
FILE.mdd
FILE.1.mdd
FILE.2.mdd
FILE.3.mdd
"""
filenameNoExt, _ext = splitext(self._filename)
mddBase = filenameNoExt + extsep
for fname in (f"{mddBase}mdd", f"{mddBase}1.mdd"):
if isfile(fname):
self._mdd.append(MDD(fname))
mddN = 2
while isfile(f"{mddBase}{mddN}.mdd"):
self._mdd.append(MDD(f"{mddBase}{mddN}.mdd"))
mddN += 1
dataEntryCount = 0
for mdd in self._mdd:
dataEntryCount += len(mdd)
self._dataEntryCount = dataEntryCount
log.info(f"Found {len(self._mdd)} mdd files with {dataEntryCount} entries")
# from pprint import pformat
# log.debug("mdx.header = " + pformat(self._mdx.header))
# for key, value in self._mdx.header.items():
# key = key.lower()
# self._glos.setInfo(key, value)
try:
title = toStr(self._mdx.header[b"Title"])
except KeyError:
pass
else:
title = title.strip()
if title:
self._glos.setInfo("name", title)
desc = toStr(self._mdx.header.get(b"Description", ""))
if desc:
self._glos.setInfo("description", desc)
self.loadLinks()
def loadLinks(self) -> None:
from pyglossary.plugin_lib.readmdict import MDX
mdx = self._mdx
if mdx is None:
raise ValueError("mdx is None")
log.info("extracting links...")
linksDict: "dict[str, str]" = {}
word = ""
wordCount = 0
for b_word, b_defi in mdx.items():
word = b_word.decode("utf-8")
defi = b_defi.decode("utf-8").strip()
if defi.startswith("@@@LINK="):
if not word:
log.warning(f"unexpected defi: {defi}")
continue
mainWord = defi[8:]
if mainWord in linksDict:
linksDict[mainWord] += "\n" + word
else:
linksDict[mainWord] = word
continue
wordCount += 1
log.info(
f"extracting links done, sizeof(linksDict)={sys.getsizeof(linksDict)}",
)
log.info(f"{wordCount = }")
self._linksDict = linksDict
self._wordCount = wordCount
self._mdx = MDX(self._filename, self._encoding, self._substyle)
def fixDefi(self, defi: str) -> str:
defi = self._re_internal_link.sub(r"href=\1bword://", defi)
defi = defi.replace(' src="file://', ' src=".')
if self._audio:
# \5 is the possible elements between <a ...> and </a>
# but anything between <audio...> and </audio> is completely
# ignored by Aaard2 Web and browser
# and there is no point adding it after </audio>
# which makes it shown after audio controls
# GoldenDict acts completely different, so must use
# audio_goldendict=True option in StarDict writer instead.
defi = self._re_audio_link.sub(
r'<audio controls src="\3"></audio>',
defi,
)
return defi
def __iter__(self) -> Iterator[EntryType]:
if self._mdx is None:
log.error("trying to iterate on a closed MDX file")
return
glos = self._glos
linksDict = self._linksDict
for b_word, b_defi in self._mdx.items():
word = b_word.decode("utf-8")
defi = b_defi.decode("utf-8").strip()
if defi.startswith("@@@LINK="):
continue
defi = self.fixDefi(defi)
words = word
altsStr = linksDict.get(word, "")
if altsStr:
words = [word] + altsStr.split("\n")
yield glos.newEntry(words, defi)
self._mdx = None
del linksDict
self._linksDict = {}
gc.collect()
if self._same_dir_data_files:
dirPath = dirname(self._filename)
for fname in os.listdir(dirPath):
ext = splitext(fname)[1].lower()
if ext in {".mdx", ".mdd"}:
continue
fpath = join(dirPath, fname)
if not isfile(fpath):
continue
with open(fpath, mode="rb") as _file:
b_data = _file.read()
yield glos.newDataEntry(fname, b_data)
for mdd in self._mdd:
try:
for b_fname, b_data in mdd.items():
fname = toStr(b_fname)
fname = fname.replace("\\", os.sep).lstrip(os.sep)
yield glos.newDataEntry(fname, b_data)
except Exception: # noqa: PERF203
log.exception(f"Error reading {mdd.filename}")
self._mdd = []
def __len__(self) -> int:
return self._wordCount + self._dataEntryCount
def close(self) -> None:
self.clear()
| 7,169
|
Python
|
.py
| 236
| 27.305085
| 79
| 0.685035
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,866
|
_dict.py
|
ilius_pyglossary/pyglossary/plugins/appledict/_dict.py
|
# -*- coding: utf-8 -*-
# appledict/_dict.py
# Output to Apple Dictionary xml sources for Dictionary Development Kit.
#
# Copyright © 2016-2019 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2016 ivan tkachenko me@ratijas.tk
# Copyright © 2012-2015 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import logging
import string
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Callable, Iterator
from typing import Any
from . import _normalize
__all__ = ["_normalize", "id_generator", "indexes_generator", "quote_string"]
log = logging.getLogger("pyglossary")
digs = string.digits + string.ascii_letters
def base36(x: int) -> str:
"""
Simplified version of int2base
http://stackoverflow.com/questions/2267362/convert-integer-to-a-string-in-a-given-numeric-base-in-python#2267446.
"""
digits = []
while x:
digits.append(digs[x % 36])
x //= 36
digits.reverse()
return "".join(digits)
def id_generator() -> Iterator[str]:
cnt = 1
while True:
yield "_" + str(base36(cnt))
cnt += 1
def quote_string(value: str, BeautifulSoup: Any) -> str:
if BeautifulSoup:
return BeautifulSoup.dammit.EntitySubstitution.substitute_xml(
value,
make_quoted_attribute=True,
)
return '"' + value.replace(">", ">").replace('"', """) + '"'
def indexes_generator(
indexes_lang: str,
) -> """Callable[
[str, list[str], str, Any],
str,
]""":
"""Generate indexes according to glossary language."""
indexer = None
"""Callable[[Sequence[str], str], Sequence[str]]"""
if indexes_lang:
from . import indexes as idxs
indexer = idxs.languages.get(indexes_lang, None)
if not indexer:
keys_str = ", ".join(idxs.languages)
msg = (
"extended indexes not supported for the"
f" specified language: {indexes_lang}.\n"
f"following languages available: {keys_str}."
)
log.error(msg)
raise ValueError(msg)
def generate_indexes(
title: str,
alts: list[str],
content: str,
BeautifulSoup: Any,
) -> str:
indexes = [title]
indexes.extend(alts)
quoted_title = quote_string(title, BeautifulSoup)
if indexer:
indexes = list(set(indexer(indexes, content)))
normal_indexes = set()
for idx in indexes:
normal = _normalize.title(idx, BeautifulSoup)
normal_indexes.add(_normalize.title_long(normal))
normal_indexes.add(_normalize.title_short(normal))
normal_indexes.discard(title)
s = f"<d:index d:value={quoted_title} d:title={quoted_title}/>"
for idx in normal_indexes:
if not idx.strip():
# skip empty titles. everything could happen.
continue
quoted_idx = quote_string(idx, BeautifulSoup)
s += f"<d:index d:value={quoted_idx} d:title={quoted_title}/>"
return s
return generate_indexes
| 3,358
|
Python
|
.py
| 100
| 31
| 114
| 0.729889
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,867
|
_content.py
|
ilius_pyglossary/pyglossary/plugins/appledict/_content.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2016-2019 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2016 ivan tkachenko me@ratijas.tk
# Copyright © 2012-2015 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# FIXME:
# MDX-specific parts should be isolated and moved to MDX Reader
# and parts that are specific to one glossary
# (like Oxford_Advanced_English-Chinese_Dictionary_9th_Edition.mdx)
# should be moved to separate modules (like content processors) and enabled
# per-glossary (by title or something else)
from __future__ import annotations
import logging
import re
from typing import Any
from xml.sax.saxutils import quoteattr, unescape
from pyglossary.text_utils import toStr
__all__ = ["prepare_content"]
log = logging.getLogger("pyglossary")
re_brhr = re.compile("<(BR|HR)>", re.IGNORECASE)
re_nonprintable = re.compile("[\x00-\x07\x0e-\x1f]")
re_img = re.compile("<IMG (.*?)>", re.IGNORECASE)
re_div_margin_em = re.compile(r'<div style="margin-left:(\d)em">')
sub_div_margin_em = r'<div class="m\1">'
re_div_margin_em_ex = re.compile(
r'<div class="ex" style="margin-left:(\d)em;color:steelblue">',
)
sub_div_margin_em_ex = r'<div class="m\1 ex">'
re_href = re.compile(r"""href=(["'])(.*?)\1""")
re_margin = re.compile(r"margin-left:(\d)em")
def prepare_content(
title: "str | None",
body: str,
BeautifulSoup: Any,
) -> str:
# heavily integrated with output of dsl reader plugin!
# and with xdxf also.
""":param title: str | None"""
# class="sec" => d:priority="2"
# style="color:steelblue" => class="ex"
# class="p" style="color:green" => class="p"
# style="color:green" => class="c"
# style="margin-left:{}em" => class="m{}"
# <s> => <del>
# xhtml is strict
if BeautifulSoup:
content = prepare_content_with_soup(title, body, BeautifulSoup)
else:
content = prepare_content_without_soup(title, body)
content = content.replace(" ", " ")
content = re_nonprintable.sub("", content)
return content # noqa: RET504
def prepare_content_without_soup(
title: "str | None",
body: str,
) -> str:
# somewhat analogue to what BeautifulSoup suppose to do
body = re_div_margin_em.sub(sub_div_margin_em, body)
body = re_div_margin_em_ex.sub(sub_div_margin_em_ex, body)
body = re_href.sub(href_sub, body)
body = (
body.replace(
'<i style="color:green">',
'<i class="c">',
)
.replace(
'<i class="p" style="color:green">',
'<i class="p">',
)
.replace(
'<span class="ex" style="color:steelblue">',
'<span class="ex">',
)
.replace(
'<span class="sec ex" style="color:steelblue">',
'<span class="sec ex">',
)
.replace("<u>", '<span class="u">')
.replace("</u>", "</span>")
.replace("<s>", "<del>")
.replace("</s>", "</del>")
)
# nice header to display
content = f"<h1>{title}</h1>{body}" if title else body
content = re_brhr.sub(r"<\g<1> />", content)
content = re_img.sub(r"<img \g<1>/>", content)
return content # noqa: RET504
def _prepare_href(tag) -> None:
href = tag["href"]
href = cleanup_link_target(href)
if href.startswith("sound:"):
fix_sound_link(href, tag)
elif href.startswith(("phonetics", "help:phonetics")):
# for oxford9
log.debug(f"phonetics: {tag=}")
if tag.audio and "name" in tag.audio.attrs:
tag["onmousedown"] = "this.lastChild.play(); return false;"
src_name = tag.audio["name"].replace("#", "_")
tag.audio["src"] = f"{src_name}.mp3"
elif not link_is_url(href):
tag["href"] = f"x-dictionary:d:{href}"
def _prepare_onclick(soup) -> None:
for thumb in soup.find_all("div", "pic_thumb"):
thumb["onclick"] = (
'this.setAttribute("style", "display:none"); '
'this.nextElementSibling.setAttribute("style", "display:block")'
)
for pic in soup.find_all("div", "big_pic"):
pic["onclick"] = (
'this.setAttribute("style", "display:none"), '
'this.previousElementSibling.setAttribute("style", "display:block")'
)
# to unfold(expand) and fold(collapse) blocks
for pos in soup.find_all("pos", onclick="toggle_infl(this)"):
# TODO: simplify this!
pos["onclick"] = (
r"var e = this.parentElement.parentElement.parentElement"
r'.querySelector("res-g vp-gs"); style = window.'
r"getComputedStyle(e), display = style.getPropertyValue"
r'("display"), "none" === e.style.display || "none" === display'
r' ? e.style.display = "block" : e.style.display = "none", '
r"this.className.match(/(?:^|\s)Clicked(?!\S)/) ? this."
r"className = this.className.replace("
r'/(?:^|\s)Clicked(?!\S)/g, "") : this.setAttribute('
r'"class", "Clicked")'
)
# TODO: PLR0912 Too many branches (18 > 12)
def prepare_content_with_soup( # noqa: PLR0912
title: "str | None",
body: str,
BeautifulSoup: Any,
) -> str:
soup = BeautifulSoup.BeautifulSoup(body, features="lxml")
# difference between "lxml" and "html.parser"
if soup.body:
soup = soup.body
for tag in soup(class_="sec"):
tag["class"].remove("sec")
if not tag["class"]:
del tag["class"]
tag["d:priority"] = "2"
for tag in soup(lambda x: "color:steelblue" in x.get("style", "")):
remove_style(tag, "color:steelblue")
if "ex" not in tag.get("class", []):
tag["class"] = tag.get("class", []) + ["ex"]
for tag in soup(is_green):
remove_style(tag, "color:green")
if "p" not in tag.get("class", ""):
tag["class"] = tag.get("class", []) + ["c"]
for tag in soup(True):
if "style" in tag.attrs:
m = re_margin.search(tag["style"])
if m:
remove_style(tag, m.group(0))
tag["class"] = tag.get("class", []) + ["m" + m.group(1)]
for tag in soup(lambda x: "xhtml:" in x.name):
old_tag_name = tag.name
tag.name = old_tag_name[len("xhtml:") :]
if tag.string:
tag.string = f"{tag.string} "
for tag in soup.select("[href]"):
_prepare_href(tag)
_prepare_onclick(soup)
for tag in soup.select("[src]"):
src = tag["src"]
if src.startswith("/"):
tag["src"] = src[1:]
for tag in soup("u"):
tag.name = "span"
tag["class"] = tag.get("class", []) + ["u"]
for tag in soup("s"):
tag.name = "del"
if title and "<h" not in body:
h1 = BeautifulSoup.Tag(name="h1")
h1.string = title
soup.insert(0, h1)
# hence the name BeautifulSoup
# soup.insert(0, head)
return toStr(soup.encode_contents())
def cleanup_link_target(href: str) -> str:
return href.removeprefix("bword://")
def href_sub(x: "re.Match") -> str:
href = x.groups()[1]
if href.startswith("http"):
return x.group()
href = cleanup_link_target(href)
return "href=" + quoteattr(
"x-dictionary:d:"
+ unescape(
href,
{""": '"'},
),
)
def is_green(x: dict) -> bool:
return "color:green" in x.get("style", "")
def remove_style(tag: dict, line: str) -> None:
s = "".join(tag["style"].replace(line, "").split(";"))
if s:
tag["style"] = s
else:
del tag["style"]
def fix_sound_link(href: str, tag: "dict[str, Any]") -> None:
tag["href"] = f'javascript:new Audio("{href[len("sound://"):]}").play();'
def link_is_url(href: str) -> bool:
for prefix in (
"http:",
"https:",
"addexample:",
"addid:",
"addpv:",
"help:",
"helpg:",
"helpp:",
"helpr:",
"helpxr:",
"xi:",
"xid:",
"xp:",
"sd:",
"#",
):
if href.startswith(prefix):
return True
return False
| 7,780
|
Python
|
.py
| 236
| 30.360169
| 75
| 0.658387
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,868
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/appledict/__init__.py
|
# -*- coding: utf-8 -*-
#
# Output to Apple Dictionary xml sources for Dictionary Development Kit.
#
# Copyright © 2016-2023 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2016 ivan tkachenko <me@ratijas.tk>
# Copyright © 2012-2015 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import os
import pkgutil
import shutil
import sys
from os.path import basename, isdir, join
from typing import TYPE_CHECKING, Any
from pyglossary.core import log, pip
from pyglossary.option import (
BoolOption,
DictOption,
Option,
StrOption,
)
from pyglossary.text_utils import toStr
from ._content import prepare_content
from ._dict import (
_normalize,
id_generator,
indexes_generator,
quote_string,
)
if TYPE_CHECKING:
import io
from collections.abc import Generator
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Writer",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
sys.setrecursionlimit(10000)
enable = True
lname = "appledict"
format = "AppleDict"
description = "AppleDict Source"
extensions = (".apple",)
extensionCreate = ".apple/"
singleFile = False
kind = "directory"
wiki = ""
website = (
"https://support.apple.com/en-gu/guide/dictionary/welcome/mac",
"Dictionary User Guide for Mac",
)
# FIXME: rename indexes arg/option to indexes_lang?
optionsProp: "dict[str, Option]" = {
"clean_html": BoolOption(comment="use BeautifulSoup parser"),
"css": StrOption(
comment="custom .css file path",
),
"xsl": StrOption(
comment="custom XSL transformations file path",
),
"default_prefs": DictOption(
comment="default prefs in python dict format",
# example: {"key": "value", "version": "1"}
),
"prefs_html": StrOption(
comment="preferences XHTML file path",
),
"front_back_matter": StrOption(
comment="XML file path with top-level tag",
),
"jing": BoolOption(comment="run Jing check on generated XML"),
"indexes": StrOption(
customValue=False,
values=["", "ru", "zh"],
comment="Additional indexes to dictionary entries",
),
}
extraDocs = [
(
"Also see:",
"See [doc/apple.md](./doc/apple.md) for additional AppleDict instructions.",
),
]
BeautifulSoup = None
def loadBeautifulSoup() -> None:
global BeautifulSoup
try:
import bs4 as BeautifulSoup
except ImportError:
try:
import BeautifulSoup # type: ignore
except ImportError:
return
_version: str = BeautifulSoup.__version__ # type: ignore
if int(_version.split(".")[0]) < 4:
raise ImportError(
"BeautifulSoup is too old, required at least version 4, "
f"{_version!r} found.\n"
f"Please run `{pip} install lxml beautifulsoup4 html5lib`",
)
def abspath_or_None(path: "str | None") -> str | None:
if not path:
return None
return os.path.abspath(os.path.expanduser(path))
def write_header(
toFile: "io.TextIOBase",
front_back_matter: "str | None",
) -> None:
# write header
toFile.write(
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<d:dictionary xmlns="http://www.w3.org/1999/xhtml" '
'xmlns:d="http://www.apple.com/DTDs/DictionaryService-1.0.rng">\n',
)
if front_back_matter:
with open(
front_back_matter,
encoding="utf-8",
) as _file:
toFile.write(_file.read())
def format_default_prefs(default_prefs: "dict[str, Any] | None") -> str:
"""
:type default_prefs: dict or None
as by 14th of Jan 2016, it is highly recommended that prefs should contain
{"version": "1"}, otherwise Dictionary.app does not keep user changes
between restarts.
"""
if not default_prefs:
return ""
if not isinstance(default_prefs, dict):
raise TypeError(f"default_prefs not a dictionary: {default_prefs!r}")
if str(default_prefs.get("version", None)) != "1":
log.error(
"default prefs does not contain {'version': '1'}. prefs "
"will not be persistent between Dictionary.app restarts.",
)
return "\n".join(
f"\t\t<key>{key}</key>\n\t\t<string>{value}</string>"
for key, value in sorted(default_prefs.items())
).strip()
def write_css(fname: str, css_file: str) -> None:
with open(fname, mode="wb") as toFile:
if css_file:
with open(css_file, mode="rb") as fromFile:
toFile.write(fromFile.read())
else:
data = pkgutil.get_data(
__name__,
"templates/Dictionary.css",
)
if data is None:
raise RuntimeError("failed to load templates/Dictionary.css")
toFile.write(data)
"""
write glossary to Apple dictionary .xml and supporting files.
:param dirname: directory path, must not have extension
:param clean_html: pass True to use BeautifulSoup parser.
:param css: path to custom .css file
:param xsl: path to custom XSL transformations file.
:param default_prefs: Default prefs in python dictionary literal format,
i.e. {"key1": "value1", "key2": "value2", ...}. All keys and values
must be quoted strings; not allowed characters (e.g. single/double
quotes,equal sign "=", semicolon) must be escaped as hex code
according to python string literal rules.
:param prefs_html: path to XHTML file with user interface for
dictionary's preferences. refer to Apple's documentation for details.
:param front_back_matter: path to XML file with top-level tag
<d:entry id="front_back_matter" d:title="Your Front/Back Matter Title">
your front/back matter entry content
</d:entry>
:param jing: pass True to run Jing check on generated XML.
# FIXME: rename to indexes_lang?
:param indexes: Dictionary.app is dummy and by default it don't know
how to perform flexible search. we can help it by manually providing
additional indexes to dictionary entries.
"""
class Writer:
depends = {
"lxml": "lxml",
"bs4": "beautifulsoup4",
"html5lib": "html5lib",
}
_clean_html: bool = True
_css: str = ""
_xsl: str = ""
_default_prefs: "dict | None" = None
_prefs_html: str = ""
_front_back_matter: str = ""
_jing: bool = False
_indexes: str = "" # FIXME: rename to indexes_lang?
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self._dirname = ""
def finish(self) -> None:
self._dirname = ""
def open(self, dirname: str) -> None:
self._dirname = dirname
if not isdir(dirname):
os.mkdir(dirname)
def write(self) -> Generator[None, EntryType, None]: # noqa: PLR0912
global BeautifulSoup
from pyglossary.xdxf.transform import XdxfTransformer
glos = self._glos
clean_html = self._clean_html
css: "str | None" = self._css
xsl: "str | None" = self._xsl
default_prefs = self._default_prefs
prefs_html: "str | None" = self._prefs_html
front_back_matter: "str | None" = self._front_back_matter
jing = self._jing
indexes = self._indexes
xdxf_to_html = XdxfTransformer(encoding="utf-8")
if clean_html:
if BeautifulSoup is None:
loadBeautifulSoup()
if BeautifulSoup is None:
log.warning(
"clean_html option passed but BeautifulSoup not found. "
"to fix this run "
f"`{pip} install lxml beautifulsoup4 html5lib`",
)
else:
BeautifulSoup = None
dirname = self._dirname
fileNameBase = basename(dirname).replace(".", "_")
filePathBase = join(dirname, fileNameBase)
# before chdir (outside indir block)
css = abspath_or_None(css)
xsl = abspath_or_None(xsl)
prefs_html = abspath_or_None(prefs_html)
front_back_matter = abspath_or_None(front_back_matter)
generate_id = id_generator()
generate_indexes = indexes_generator(indexes)
myResDir = join(dirname, "OtherResources")
if not isdir(myResDir):
os.mkdir(myResDir)
with open(filePathBase + ".xml", mode="w", encoding="utf-8") as toFile:
write_header(toFile, front_back_matter)
while True:
entry = yield
if entry is None:
break
if entry.isData():
entry.save(myResDir)
continue
words = entry.l_word
word, alts = words[0], words[1:]
defi = entry.defi
long_title = _normalize.title_long(
_normalize.title(word, BeautifulSoup),
)
if not long_title:
continue
_id = next(generate_id)
quoted_title = quote_string(long_title, BeautifulSoup)
content_title: "str | None" = long_title
if entry.defiFormat == "x":
defi = xdxf_to_html.transformByInnerString(defi)
content_title = None
content = prepare_content(content_title, defi, BeautifulSoup)
toFile.write(
f'<d:entry id="{_id}" d:title={quoted_title}>\n'
+ generate_indexes(long_title, alts, content, BeautifulSoup)
+ content
+ "\n</d:entry>\n",
)
toFile.write("</d:dictionary>\n")
if xsl:
shutil.copy(xsl, myResDir)
if prefs_html:
shutil.copy(prefs_html, myResDir)
write_css(filePathBase + ".css", css)
with open(join(dirname, "Makefile"), mode="w", encoding="utf-8") as toFile:
toFile.write(
toStr(
pkgutil.get_data(
__name__,
"templates/Makefile",
),
).format(dict_name=fileNameBase),
)
_copyright = glos.getInfo("copyright")
if BeautifulSoup:
# strip html tags
_copyright = str(
BeautifulSoup.BeautifulSoup(
_copyright,
features="lxml",
).text,
)
# if DCSDictionaryXSL provided but DCSDictionaryDefaultPrefs <dict/> not
# present in Info.plist, Dictionary.app will crash.
with open(filePathBase + ".plist", mode="w", encoding="utf-8") as toFile:
frontMatterReferenceID = (
"<key>DCSDictionaryFrontMatterReferenceID</key>\n"
"\t<string>front_back_matter</string>"
if front_back_matter
else ""
)
bundle_id = glos.getInfo("CFBundleIdentifier")
if not bundle_id:
bundle_id = fileNameBase.replace(" ", "")
toFile.write(
toStr(
pkgutil.get_data(
__name__,
"templates/Info.plist",
),
).format(
# identifier must be unique
CFBundleIdentifier=bundle_id,
CFBundleDisplayName=glos.getInfo("name"),
CFBundleName=fileNameBase,
DCSDictionaryCopyright=_copyright,
DCSDictionaryManufacturerName=glos.author,
DCSDictionaryXSL=basename(xsl) if xsl else "",
DCSDictionaryDefaultPrefs=format_default_prefs(default_prefs),
DCSDictionaryPrefsHTML=basename(prefs_html) if prefs_html else "",
DCSDictionaryFrontMatterReferenceID=frontMatterReferenceID,
),
)
if jing:
from .jing import run as jing_run
jing_run(filePathBase + ".xml")
| 10,857
|
Python
|
.py
| 346
| 28.283237
| 78
| 0.714026
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,869
|
_normalize.py
|
ilius_pyglossary/pyglossary/plugins/appledict/_normalize.py
|
# -*- coding: utf-8 -*-
# appledict/_normalize.py
# Output to Apple Dictionary xml sources for Dictionary Development Kit.
#
# Copyright © 2016-2019 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2016 ivan tkachenko me@ratijas.tk
# Copyright © 2012-2015 Xiaoqiang Wang <xiaoqiangwang AT gmail DOT com>
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
import re
from typing import Any
re_spaces = re.compile(r"[ \t\n]{2,}")
re_title = re.compile('<[^<]+?>|"|[<>]|\xef\xbb\xbf')
re_title_short = re.compile(r"\[.*?\]")
re_whitespace = re.compile("(\t|\n|\r)")
# FIXME: rename all/most functions here, add a 'fix_' prefix
def spaces(s: str) -> str:
"""
Strip off leading and trailing whitespaces and
replace contiguous whitespaces with just one space.
"""
return re_spaces.sub(" ", s.strip())
_brackets_sub = (
(
re.compile(r"( *)\{( *)\\\[( *)"), # { \[
r"\1\2\3[",
),
(
re.compile(r"( *)\\\]( *)\}( *)"), # \] }
r"]\1\2\3",
),
(
re.compile(r"( *)\{( *)\(( *)\}( *)"), # { ( }
r"\1\2\3\4[",
),
(
re.compile(r"( *)\{( *)\)( *)\}( *)"), # { ) }
r"]\1\2\3\4",
),
(
re.compile(r"( *)\{( *)\(( *)"), # { (
r"\1\2\3[",
),
(
re.compile(r"( *)\)( *)\}( *)"), # ) }
r"]\1\2\3",
),
(
re.compile(r"( *)\{( *)"), # {
r"\1\2[",
),
(
re.compile(r"( *)\}( *)"), # }
r"]\1\2",
),
(
re.compile(r"{.*?}"),
r"",
),
)
def brackets(s: str) -> str:
r"""
Replace all crazy brackets with square ones [].
following combinations are to replace:
{ \[ ... \] }
{ ( } ... { ) }
{ ( ... ) }
{ ... }
"""
if "{" in s:
for exp, sub in _brackets_sub:
s = exp.sub(sub, s)
return spaces(s)
def truncate(text: str, length: int = 449) -> str:
"""
Trunct a string to given length
:param str text:
:return: truncated text
:rtype: str.
"""
content = re_whitespace.sub(" ", text)
if len(text) > length:
# find the next space after max_len chars (do not break inside a word)
pos = content[:length].rfind(" ")
if pos == -1:
pos = length
text = text[:pos]
return text # noqa: RET504
def title(title: str, BeautifulSoup: Any) -> str:
"""Strip double quotes and html tags."""
if BeautifulSoup:
title = title.replace("\xef\xbb\xbf", "")
if len(title) > 1:
# BeautifulSoup has a bug when markup <= 1 char length
title = BeautifulSoup.BeautifulSoup(
title,
features="lxml",
# FIXME: html or lxml? gives warning unless it's lxml
).get_text(strip=True)
else:
title = re_title.sub("", title)
title = title.replace("&", "&")
title = brackets(title)
title = truncate(title, 1126)
return title # noqa: RET504
def title_long(s: str) -> str:
"""
Return long title line.
Example:
-------
title_long("str[ing]") -> string.
"""
return s.replace("[", "").replace("]", "")
def title_short(s: str) -> str:
"""
Return short title line.
Example:
-------
title_short("str[ing]") -> str.
"""
return spaces(re_title_short.sub("", s))
| 3,534
|
Python
|
.py
| 132
| 24.469697
| 72
| 0.615931
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,870
|
__main__.py
|
ilius_pyglossary/pyglossary/plugins/appledict/jing/__main__.py
|
"""main entry point."""
import logging
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__))) # noqa: E402
from . import main
log = logging.getLogger("root")
console_output_handler = logging.StreamHandler(sys.stderr)
console_output_handler.setFormatter(
logging.Formatter(
"%(asctime)s: %(message)s",
),
)
log.addHandler(console_output_handler)
log.setLevel(logging.INFO)
main.main()
| 420
|
Python
|
.py
| 16
| 24.6875
| 73
| 0.77193
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,871
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/appledict/jing/__init__.py
|
"""
checking XML files with Apple Dictionary Schema.
this module can be run from command line with only argument -- file to
be checked. otherwise, you need to import this module and call
`run` function with the filename as its only argument.
"""
__all__ = ["JingTestError", "run"]
from .main import JingTestError, run
| 322
|
Python
|
.py
| 8
| 38.875
| 70
| 0.765273
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,872
|
main.py
|
ilius_pyglossary/pyglossary/plugins/appledict/jing/main.py
|
from __future__ import annotations
"""Jing, a validator for RELAX NG and other schema languages."""
import logging
import subprocess
import sys
from os import path
__all__ = ["JingTestError", "main", "run"]
log = logging.getLogger("pyglossary")
log.setLevel(logging.DEBUG)
class JingTestError(subprocess.CalledProcessError):
"""
A exception that is raised when jing test failed, e.g. returned non-zero.
the exit status will be stored in the `returncode` attribute.
the `output` attribute also will store the output.
"""
def __init__(
self,
returncode: int,
cmd: list[str],
output: bytes,
) -> None:
super().__init__(returncode, cmd, output)
def __str__(self) -> str:
return "\n".join(
[
f"Jing check failed with exit code {self.returncode}:",
"-" * 80,
self.output,
],
)
def run(filename: str) -> None:
"""
Check whether the file named `filename` conforms to
`AppleDictionarySchema.rng`.
:returns: None
:raises: JingTestError
"""
here = path.abspath(path.dirname(__file__))
filename = path.abspath(filename)
jing_jar_path = path.join(here, "jing", "bin", "jing.jar")
rng_path = path.join(here, "DictionarySchema", "AppleDictionarySchema.rng")
# -Xmxn Specifies the maximum size, in bytes, of the memory allocation pool
# -- from `man 1 java`
cmd = ["java", "-Xmx2G", "-jar", jing_jar_path, rng_path, filename]
log.info("running Jing check:")
log.info(f"{cmd}")
log.info("...")
pipe = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
returncode = pipe.wait()
output = pipe.communicate()[0]
if returncode != 0:
if returncode < 0:
log.error(f"Jing was terminated by signal {-returncode}")
elif returncode > 0:
log.error(f"Jing returned {returncode}")
raise JingTestError(returncode, cmd, output)
log.info("Jing check successfully passed!")
def main() -> None:
"""
Run Jing test on given dictionary XML file with Apple Dictionary Schema.
It's a command-line utility.
"""
if len(sys.argv) < 2:
prog_name = path.basename(sys.argv[0])
log.info(f"usage:\n {prog_name} filename")
sys.exit(1)
try:
run(sys.argv[1])
except JingTestError as e:
log.fatal(str(e))
sys.exit(e.returncode)
| 2,219
|
Python
|
.py
| 75
| 27
| 76
| 0.707156
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,873
|
ru.py
|
ilius_pyglossary/pyglossary/plugins/appledict/indexes/ru.py
|
# -*- coding: utf-8 -*-
# appledict/indexes/ru.py
#
# Copyright © 2016 ivan tkachenko me@ratijas.tk
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""Russian indexes based on pymorphy."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Sequence
from pyglossary.core import log, pip
from . import languages
try:
import pymorphy2 # type: ignore
except ImportError:
log.error(
f"""module pymorphy2 is required to build extended Russian indexes.
You can download it here: http://pymorphy2.readthedocs.org/en/latest/.
Or by running: {pip} install pymorphy2""",
)
raise
morphy = pymorphy2.MorphAnalyzer()
def ru(titles: Sequence[str], _: str) -> set[str]:
"""
Give a set of all declines, cases and other forms of word `title`.
note that it works only if title is one word.
"""
indexes: set[str] = set()
indexes_norm: set[str] = set()
for title in titles:
# in-place modification
_ru(title, indexes, indexes_norm)
return indexes
def _ru(title: str, a: set[str], a_norm: "set[str]") -> None:
# uppercase abbreviature
if title.isupper():
return
title_norm = normalize(title)
# feature: put dot at the end to match only this word
a.add(title)
a.add(title + ".")
a_norm.add(title_norm)
# decline only one-word titles
if len(title.split()) == 1:
normal_forms = morphy.parse(title)
if len(normal_forms) > 0:
# forms of most probable match
normal_form = normal_forms[0]
for x in normal_form.lexeme:
word = x.word
# Apple Dictionary Services see no difference between
# "й" and "и", "ё" and "е", so we're trying to avoid
# "* Duplicate index. Skipped..." warning.
# new: return indexes with original letters but check for
# occurrence against "normal forms".
word_norm = normalize(word)
if word_norm not in a_norm:
a.add(word)
a_norm.add(word_norm)
def normalize(word: str) -> str:
return word.lower().replace("й", "и").replace("ё", "е").replace("-", " ")
languages["ru"] = ru
| 2,585
|
Python
|
.py
| 73
| 32.835616
| 74
| 0.726138
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,874
|
zh.py
|
ilius_pyglossary/pyglossary/plugins/appledict/indexes/zh.py
|
# -*- coding: utf-8 -*-
# appledict/indexes/zh.py
#
# Copyright © 2016 ivan tkachenko me@ratijas.tk
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
"""Chinese wildcard and pinyin indexes."""
import re
from collections.abc import Sequence
import bs4
from pyglossary.core import log, pip
try:
import colorize_pinyin as color # type: ignore
except ImportError:
log.error(
"module colorize_pinyin is required to build extended Chinese"
" indexes. You can install it by running: "
f"{pip} install colorize-pinyin",
)
raise
from . import languages, log
pinyinPattern = re.compile(r",|;")
nonHieroglyphPattern = re.compile(r"[^\u4e00-\u9fff]")
def zh(titles: Sequence[str], content: str) -> set[str]:
"""
Chinese indexes.
assuming that content is HTML and pinyin is inside second tag
(first is <h1>), we can try to parse pinyin and generate indexes
with pinyin subwords separated by whitespaces
- pinyin itself
- pinyin with diacritics replaced by tone numbers
multiple pronunciations separated by comma or semicolon are supported.
"""
indexes = set()
for title in titles:
# feature: put dot at the end to match only this word
indexes.update({title, title + "。"})
# remove all non hieroglyph
indexes.add(nonHieroglyphPattern.sub("", title))
indexes.update(pinyin_indexes(content))
return indexes
def pinyin_indexes(content: str) -> set[str]:
pinyin = find_pinyin(content)
# assert type(pinyin) == unicode
if not pinyin or pinyin == "_":
return set()
indexes = set()
# multiple pronunciations
for pinyinPart in pinyinPattern.split(pinyin):
# find all pinyin ranges, use them to rip pinyin out
py = [
r._slice(pinyinPart) for r in color.ranges_of_pinyin_in_string(pinyinPart)
]
# maybe no pinyin here
if not py:
return set()
# just pinyin, with diacritics, separated by whitespace
indexes.add(color.utf(" ".join(py)) + ".")
# pinyin with diacritics replaced by tone numbers
indexes.add(
color.utf(
" ".join(
[
color.lowercase_string_by_removing_pinyin_tones(p)
+ str(color.determine_tone(p))
for p in py
],
),
)
+ ".",
)
return indexes
def find_pinyin(content: str) -> str | None:
# assume that content is HTML and pinyin is inside second tag
# (first is <h1>)
soup = bs4.BeautifulSoup(content.splitlines()[0], features="lxml")
if soup.body:
soup = soup.body # type: ignore # noqa: PGH003
children = soup.children
try:
next(children) # type: ignore # noqa: PGH003
pinyin = next(children) # type: ignore # noqa: PGH003
except StopIteration:
return None
return pinyin.text
languages["zh"] = zh
| 3,219
|
Python
|
.py
| 97
| 30.56701
| 77
| 0.73771
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,875
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/appledict/indexes/__init__.py
|
# -*- coding: utf-8 -*-
# appledict/indexes/__init__.py
#
# Copyright © 2016 ivan tkachenko me@ratijas.tk
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# You can get a copy of GNU General Public License along this program
# But you can always get it from http://www.gnu.org/licenses/gpl.txt
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import annotations
"""extended indexes generation with respect to source language."""
import os
import pkgutil
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Callable, Sequence
from pyglossary.core import log
__all__ = ["languages", "log"]
languages: dict[str, Callable[[Sequence[str], str], set[str]]] = {}
"""
submodules must register languages by adding (language name -> function)
pairs to the mapping.
function must follow signature below:
:param titles: flat iterable of title and altenrative titles
:param content: cleaned entry content
:return: iterable of indexes (str).
use
```
from . import languages
# or
from appledict.indexes import languages
```
"""
here = os.path.dirname(os.path.abspath(__file__))
for _, module, _ in pkgutil.iter_modules([here]): # type: ignore # noqa: PGH003
__import__(f"{__name__}.{module}")
| 1,592
|
Python
|
.py
| 43
| 35.604651
| 80
| 0.760884
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,876
|
bgl_text.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_text.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2008-2021 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2011-2012 kubtek <kubtek@gmail.com>
# This file is part of PyGlossary project, http://github.com/ilius/pyglossary
# Thanks to Raul Fernandes <rgfbr@yahoo.com.br> and Karl Grill for reverse
# engineering as part of https://sourceforge.net/projects/ktranslator/
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
import re
from pyglossary import core
from pyglossary.core import log
from pyglossary.xml_utils import xml_escape
__all__ = [
"fixImgLinks",
"normalizeNewlines",
"removeControlChars",
"removeNewlines",
"replaceAsciiCharRefs",
"replaceHtmlEntries",
"replaceHtmlEntriesInKeys",
"stripDollarIndexes",
"stripHtmlTags",
"unknownHtmlEntries",
]
u_pat_html_entry = re.compile("(?:&#x|&#|&)(\\w+);?", re.IGNORECASE)
u_pat_html_entry_key = re.compile("(?:&#x|&#|&)(\\w+);", re.IGNORECASE)
b_pat_ascii_char_ref = re.compile(b"(&#\\w+;)", re.IGNORECASE)
u_pat_newline_escape = re.compile("[\\r\\n\\\\]")
u_pat_strip_tags = re.compile("(?:<[/a-zA-Z].*?(?:>|$))+")
u_pat_control_chars = re.compile("[\x00-\x08\x0c\x0e-\x1f]")
u_pat_newline = re.compile("[\r\n]+")
unknownHtmlEntries = set()
def replaceHtmlEntryNoEscapeCB(u_match: "re.Match") -> str:
"""
u_match: instance of _sre.SRE_Match
Replace character entity with the corresponding character.
Return the original string if conversion fails.
Use this as a replace function of re.sub.
"""
from pyglossary.html_utils import name2codepoint
u_text = u_match.group(0)
u_name = u_match.group(1)
if core.isDebug():
assert isinstance(u_text, str)
assert isinstance(u_name, str)
if u_text[:2] == "&#":
# character reference
try:
code = int(u_name, 16) if u_text[:3].lower() == "&#x" else int(u_name)
if code <= 0:
raise ValueError(f"{code = }")
return chr(code)
except (ValueError, OverflowError):
return chr(0xFFFD) # replacement character
elif u_text[0] == "&":
"""
Babylon dictionaries contain a lot of non-standard entity,
references for example, csdot, fllig, nsm, cancer, thlig,
tsdot, upslur...
This not just a typo. These entries repeat over and over again.
Perhaps they had meaning in the source dictionary that was
converted to Babylon, but now the meaning is lost. Babylon
does render them as is, that is, for example, &csdot; despite
other references like & are replaced with corresponding
characters.
"""
# named entity
try:
return chr(name2codepoint[u_name.lower()])
except KeyError:
unknownHtmlEntries.add(u_text)
return u_text
raise ValueError(f"{u_text[0] =}")
def replaceHtmlEntryCB(u_match: "re.Match") -> str:
"""
u_match: instance of _sre.SRE_Match
Same as replaceHtmlEntryNoEscapeCB, but escapes result string.
Only <, >, & characters are escaped.
"""
u_res = replaceHtmlEntryNoEscapeCB(u_match)
if u_match.group(0) == u_res: # conversion failed
return u_res
# FIXME: should " and ' be escaped?
return xml_escape(u_res, quotation=False)
# def replaceDingbat(u_match: "re.Match") -> str:
# r"""Replace chars \\u008c-\\u0095 with \\u2776-\\u277f."""
# ch = u_match.group(0)
# code = ch + 0x2776 - 0x8C
# return chr(code)
def escapeNewlinesCallback(u_match: "re.Match") -> str:
"""u_match: instance of _sre.SRE_Match."""
ch = u_match.group(0)
if ch == "\n":
return "\\n"
if ch == "\r":
return "\\r"
if ch == "\\":
return "\\\\"
return ch
def replaceHtmlEntries(u_text: str) -> str:
# &ldash;
# “
# ċ
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_html_entry.sub(
replaceHtmlEntryCB,
u_text,
)
def replaceHtmlEntriesInKeys(u_text: str) -> str:
# &ldash;
# “
# ċ
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_html_entry_key.sub(
replaceHtmlEntryNoEscapeCB,
u_text,
)
def escapeNewlines(u_text: str) -> str:
r"""
Convert text to c-escaped string:
\ -> \\
new line -> \n or \r.
"""
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_newline_escape.sub(
escapeNewlinesCallback,
u_text,
)
def stripHtmlTags(u_text: str) -> str:
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_strip_tags.sub(
" ",
u_text,
)
def removeControlChars(u_text: str) -> str:
# \x09 - tab
# \x0a - line feed
# \x0b - vertical tab
# \x0d - carriage return
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_control_chars.sub(
"",
u_text,
)
def removeNewlines(u_text: str) -> str:
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_newline.sub(
" ",
u_text,
)
def normalizeNewlines(u_text: str) -> str:
"""Convert new lines to unix style and remove consecutive new lines."""
if core.isDebug():
assert isinstance(u_text, str)
return u_pat_newline.sub(
"\n",
u_text,
)
def replaceAsciiCharRefs(b_text: bytes) -> bytes:
# “
# ċ
if core.isDebug():
assert isinstance(b_text, bytes)
b_parts = b_pat_ascii_char_ref.split(b_text)
for i_part, b_part in enumerate(b_parts):
if i_part % 2 != 1:
continue
# reference
try:
code = (
int(b_part[3:-1], 16)
if b_part[:3].lower() == "&#x"
else int(b_part[2:-1])
)
if code <= 0:
raise ValueError(f"{code = }")
except (ValueError, OverflowError):
code = -1
if code < 128 or code > 255:
continue
# no need to escape "<", ">", "&"
b_parts[i_part] = bytes([code])
return b"".join(b_parts)
def fixImgLinks(u_text: str) -> str:
r"""
Fix img tag links.
src attribute value of image tag is often enclosed in \x1e - \x1f
characters.
For example:
<IMG border='0' src='\x1e6B6C56EC.png\x1f' width='9' height='8'>.
Naturally the control characters are not part of the image source name.
They may be used to quickly find all names of resources.
This function strips all such characters.
Control characters \x1e and \x1f are useless in html text, so we may
safely remove all of them, irrespective of context.
"""
if core.isDebug():
assert isinstance(u_text, str)
return u_text.replace("\x1e", "").replace("\x1f", "")
def stripDollarIndexes(b_word: bytes) -> tuple[bytes, int]:
if core.isDebug():
assert isinstance(b_word, bytes)
i = 0
b_word_main = b""
strip_count = 0 # number of sequences found
# strip $<index>$ sequences
while True:
d0 = b_word.find(b"$", i)
if d0 == -1:
b_word_main += b_word[i:]
break
d1 = b_word.find(b"$", d0 + 1)
if d1 == -1:
# log.debug(
# f"stripDollarIndexes({b_word}):\npaired $ is not found",
# )
b_word_main += b_word[i:]
break
# You may find keys (or alternative keys) like these:
# sur l'arbre$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# obscurantiste$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# They all end on a sequence of b'$', key length including dollars
# is always 60 chars.
# You may find keys like these:
# extremidade-$$$-$$$-linha
# .FIRM$$$$$$$$$$$$$
# etc
# summary: we must remove any sequence of dollar signs longer
# than 1 chars
if d1 == d0 + 1:
# log.debug(f"stripDollarIndexes({b_word}):\nfound $$")
b_word_main += b_word[i:d0]
i = d1 + 1
while i < len(b_word) and b_word[i] == ord(b"$"):
i += 1
if i >= len(b_word):
break
continue
if b_word[d0 + 1 : d1].strip(b"0123456789"):
# if has at least one non-digit char
# log.debug(f"stripDollarIndexes({b_word}):\nnon-digit between $$")
b_word_main += b_word[i:d1]
i = d1
continue
# Examples:
# make do$4$/make /do
# potere$1$<BR><BR>
# See also <a href='file://ITAL-ENG POTERE 1249-1250.pdf'>notes...</A>
# volere$1$<BR><BR>
# See also <a href='file://ITAL-ENG VOLERE 1469-1470.pdf'>notes...</A>
# Ihre$1$Ihres
if d1 + 1 < len(b_word) and b_word[d1 + 1] != 0x20:
log.debug(
f"stripDollarIndexes({b_word!r}):\n"
"second $ is followed by non-space",
)
b_word_main += b_word[i:d0]
i = d1 + 1
strip_count += 1
return b_word_main, strip_count
| 8,663
|
Python
|
.py
| 280
| 28.335714
| 78
| 0.675264
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,877
|
bgl_charset.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_charset.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2008-2020 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
__all__ = ["charsetByCode"]
charsetByCode = {
0x41: "cp1252", # Default, 0x41
0x42: "cp1252", # Latin, 0x42
0x43: "cp1250", # Eastern European, 0x43
0x44: "cp1251", # Cyrillic, 0x44
0x45: "cp932", # Japanese, 0x45
0x46: "cp950", # Traditional Chinese, 0x46
0x47: "cp936", # Simplified Chinese, 0x47
0x48: "cp1257", # Baltic, 0x48
0x49: "cp1253", # Greek, 0x49
0x4A: "cp949", # Korean, 0x4A
0x4B: "cp1254", # Turkish, 0x4B
0x4C: "cp1255", # Hebrew, 0x4C
0x4D: "cp1256", # Arabic, 0x4D
0x4E: "cp874", # Thai, 0x4E
}
| 1,327
|
Python
|
.py
| 34
| 37.558824
| 78
| 0.717273
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,878
|
bgl_reader.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_reader.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
#
# Copyright © 2008-2021 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2011-2012 kubtek <kubtek@gmail.com>
# This file is part of PyGlossary project, http://github.com/ilius/pyglossary
# Thanks to Raul Fernandes <rgfbr@yahoo.com.br> and Karl Grill for reverse
# engineering as part of https://sourceforge.net/projects/ktranslator/
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
import io
import os
import re
from collections import OrderedDict as odict
from collections.abc import Iterator
from typing import TYPE_CHECKING, NamedTuple
from pyglossary.core import log
from pyglossary.option import (
BoolOption,
EncodingOption,
HtmlColorOption,
Option,
StrOption,
)
from pyglossary.text_utils import (
excMessage,
uintFromBytes,
)
from pyglossary.xml_utils import xml_escape
from .bgl_gzip import GzipFile
from .bgl_info import (
charsetInfoDecode,
infoType3ByCode,
)
from .bgl_pos import partOfSpeechByCode
from .bgl_text import (
fixImgLinks,
normalizeNewlines,
removeControlChars,
removeNewlines,
replaceAsciiCharRefs,
replaceHtmlEntries,
replaceHtmlEntriesInKeys,
stripDollarIndexes,
stripHtmlTags,
unknownHtmlEntries,
)
if TYPE_CHECKING:
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = ["BGLGzipFile", "BglReader", "Block", "FileOffS", "optionsProp", "tmpDir"]
file = io.BufferedReader
debugReadOptions = {
"search_char_samples", # bool
"collect_metadata2", # bool
"write_gz", # bool
"char_samples_path", # str, file path
"msg_log_path", # str, file path
"raw_dump_path", # str, file path
"unpacked_gzip_path", # str, file path
}
optionsProp: "dict[str, Option]" = {
"default_encoding_overwrite": EncodingOption(
comment="Default encoding (overwrite)",
),
"source_encoding_overwrite": EncodingOption(
comment="Source encoding (overwrite)",
),
"target_encoding_overwrite": EncodingOption(
comment="Target encoding (overwrite)",
),
"part_of_speech_color": HtmlColorOption(
comment="Color for Part of Speech",
),
"no_control_sequence_in_defi": BoolOption(
comment="No control sequence in definitions",
),
"strict_string_conversion": BoolOption(
comment="Strict string conversion",
),
"process_html_in_key": BoolOption(
comment="Process HTML in (entry or info) key",
),
"key_rstrip_chars": StrOption(
multiline=True,
comment="Characters to strip from right-side of keys",
),
# debug read options:
"search_char_samples": BoolOption(
comment="(debug) Search character samples",
),
"collect_metadata2": BoolOption(
comment="(debug) Collect second pass metadata from definitions",
),
"write_gz": BoolOption(
comment="(debug) Create a file named *-data.gz",
),
"char_samples_path": StrOption(
# file path
comment="(debug) File path for character samples",
),
"msg_log_path": StrOption(
# file path
comment="(debug) File path for message log",
),
"raw_dump_path": StrOption(
# file path
comment="(debug) File path for writing raw blocks",
),
"unpacked_gzip_path": StrOption(
# file path
comment="(debug) Path to create unzipped file",
),
}
if os.sep == "/": # Operating system is Unix-like
tmpDir = "/tmp" # noqa: S108
elif os.sep == "\\": # Operating system is ms-windows
tmpDir = os.getenv("TEMP")
else:
raise RuntimeError(
f"Unknown path separator(os.sep=={os.sep!r}). What is your operating system?",
)
re_charset_decode = re.compile(
b'(<charset\\s+c\\=[\'"]?(\\w)[""]?>|</charset>)',
re.IGNORECASE,
)
re_b_reference = re.compile(b"^[0-9a-fA-F]{4}$")
class EntryWordData(NamedTuple):
pos: int
b_word: bytes
u_word: str
u_word_html: str
class BGLGzipFile(GzipFile):
"""
gzip_no_crc.py contains GzipFile class without CRC check.
It prints a warning when CRC code does not match.
The original method raises an exception in this case.
Some dictionaries do not use CRC code, it is set to 0.
"""
def __init__(
self,
fileobj: "io.IOBase | None" = None,
closeFileobj: bool = False,
**kwargs,
) -> None:
GzipFile.__init__(self, fileobj=fileobj, **kwargs)
self.closeFileobj = closeFileobj
def close(self) -> None:
if self.closeFileobj:
self.fileobj.close()
class Block:
def __init__(self) -> None:
self.data = b""
self.type = ""
# block offset in the gzip stream, for debugging
self.offset = -1
def __str__(self) -> str:
return (
f"Block type={self.type}, length={self.length}, "
f"len(data)={len(self.data)}"
)
class FileOffS(file):
"""
A file class with an offset.
This class provides an interface to a part of a file starting at specified
offset and ending at the end of the file, making it appear an independent
file. offset parameter of the constructor specifies the offset of the first
byte of the modeled file.
"""
def __init__(self, filename: str, offset: int = 0) -> None:
fileObj = open(filename, "rb") # noqa: SIM115
file.__init__(self, fileObj)
self._fileObj = fileObj
self.offset = offset
file.seek(self, offset) # OR self.seek(0)
def close(self) -> None:
self._fileObj.close()
def seek(self, pos: int, whence: int = 0) -> None:
if whence == 0: # relative to start of file
file.seek(
self,
max(0, pos) + self.offset,
0,
)
elif whence == 1: # relative to current position
file.seek(
self,
max(
self.offset,
self.tell() + pos,
),
0,
)
elif whence == 2: # relative to end of file
file.seek(self, pos, 2)
else:
raise ValueError(f"FileOffS.seek: bad whence={whence}")
def tell(self) -> int:
return file.tell(self) - self.offset
class DefinitionFields:
"""
Fields of entry definition.
Entry definition consists of a number of fields.
The most important of them are:
defi - the main definition, mandatory, comes first.
part of speech
title
"""
# nameByCode = {
# }
def __init__(self) -> None:
# self.bytesByCode = {}
# self.strByCode = {}
self.encoding = None # encoding of the definition
self.singleEncoding = True
# singleEncoding=True if the definition was encoded with
# a single encoding
self.b_defi = None # bytes, main definition part of defi
self.u_defi = None # str, main part of definition
self.partOfSpeech = None
# string representation of the part of speech, utf-8
self.b_title = None # bytes
self.u_title = None # str
self.b_title_trans = None # bytes
self.u_title_trans = None # str
self.b_transcription_50 = None # bytes
self.u_transcription_50 = None # str
self.code_transcription_50 = None
self.b_transcription_60 = None # bytes
self.u_transcription_60 = None # str
self.code_transcription_60 = None
self.b_field_1a = None # bytes
self.u_field_1a = None # str
self.b_field_07 = None # bytes
self.b_field_06 = None # bytes
self.b_field_13 = None # bytes
class BglReader:
_default_encoding_overwrite: str = ""
_source_encoding_overwrite: str = ""
_target_encoding_overwrite: str = ""
_part_of_speech_color: str = "007000"
_no_control_sequence_in_defi: bool = False
_strict_string_conversion: bool = False
# process keys and alternates as HTML
# Babylon does not interpret keys and alternates as HTML text,
# however you may encounter many keys containing character references
# and html tags. That is clearly a bug of the dictionary.
# We must be very careful processing HTML tags in keys, not damage
# normal keys. This option should be disabled by default, enabled
# explicitly by user. Namely this option does the following:
# - resolve character references
# - strip HTML tags
_process_html_in_key: bool = True
# a string of characters that will be stripped from the end of the
# key (and alternate), see str.rstrip function
_key_rstrip_chars: str = ""
##########################################################################
"""
Dictionary properties
---------------------
Dictionary (or glossary) properties are textual data like glossary name,
glossary author name, glossary author e-mail, copyright message and
glossary description. Most of the dictionaries have these properties set.
Since they contain textual data we need to know the encoding.
There may be other properties not listed here. I've enumerated only those
that are available in Babylon Glossary builder.
Playing with Babylon builder allows us detect how encoding is selected.
If global utf-8 flag is set, utf-8 encoding is used for all properties.
Otherwise the target encoding is used, that is the encoding corresponding
to the target language. The chars that cannot be represented in the target
encoding are replaced with question marks.
Using this algorithm to decode dictionary properties you may encounter that
some of them are decoded incorrectly. For example, it is clear that the
property is in cp1251 encoding while the algorithm says we must use cp1252,
and we get garbage after decoding. That is OK, the algorithm is correct.
You may install that dictionary in Babylon and check dictionary properties.
It shows the same garbage. Unfortunately, we cannot detect correct encoding
in this case automatically. We may add a parameter the will overwrite the
selected encoding, so the user may fix the encoding if needed.
"""
def __init__(self, glos: GlossaryType) -> None: # no more arguments
self._glos = glos
self._filename = ""
self.info = odict()
self.numEntries = None
####
self.sourceLang = ""
self.targetLang = ""
##
self.defaultCharset = ""
self.sourceCharset = ""
self.targetCharset = ""
##
self.sourceEncoding = None
self.targetEncoding = None
####
self.bgl_numEntries = None
self.wordLenMax = 0
self.defiMaxBytes = 0
##
self.metadata2 = None
self.rawDumpFile = None
self.msgLogFile = None
self.samplesDumpFile = None
##
self.stripSlashAltKeyPattern = re.compile(r"(^|\s)/(\w)", re.UNICODE)
self.specialCharPattern = re.compile(r"[^\s\w.]", re.UNICODE)
###
self.file = None
# offset of gzip header, set in self.open()
self.gzipOffset = None
# must be a in RRGGBB format
self.iconDataList = []
self.aboutBytes: "bytes | None" = None
self.aboutExt = ""
def __len__(self) -> int:
if self.numEntries is None:
log.warning("len(reader) called while numEntries=None")
return 0
return self.numEntries + self.numResources
# open .bgl file, read signature, find and open gzipped content
# self.file - ungzipped content
def open(
self,
filename: str,
) -> None:
self._filename = filename
if not self.openGzip():
raise OSError("BGL: failed to read gzip header")
self.readInfo()
self.setGlossaryInfo()
def openGzip(self) -> None:
with open(self._filename, "rb") as bglFile:
if not bglFile:
log.error(f"file pointer empty: {bglFile}")
return False
b_head = bglFile.read(6)
if len(b_head) < 6 or b_head[:4] not in {
b"\x12\x34\x00\x01",
b"\x12\x34\x00\x02",
}:
log.error(f"invalid header: {b_head[:6]!r}")
return False
self.gzipOffset = gzipOffset = uintFromBytes(b_head[4:6])
log.debug(f"Position of gz header: {gzipOffset}")
if gzipOffset < 6:
log.error(f"invalid gzip header position: {gzipOffset}")
return False
self.file = BGLGzipFile(
fileobj=FileOffS(self._filename, gzipOffset),
closeFileobj=True,
)
return True
# TODO: PLR0912 Too many branches (14 > 12)
def readInfo(self) -> None: # noqa: PLR0912
"""
Read meta information about the dictionary: author, description,
source and target languages, etc (articles are not read).
"""
self.numEntries = 0
self.numBlocks = 0
self.numResources = 0
block = Block()
while not self.isEndOfDictData():
if not self.readBlock(block):
break
self.numBlocks += 1
if not block.data:
continue
if block.type == 0:
self.readType0(block)
elif block.type in {1, 7, 10, 11, 13}:
self.numEntries += 1
elif block.type == 2:
self.numResources += 1
elif block.type == 3:
self.readType3(block)
else: # Unknown block.type
log.debug(
f"Unknown Block type {block.type!r}"
f", data_length = {len(block.data)}"
f", number = {self.numBlocks}",
)
self.file.seek(0)
self.detectEncoding()
log.debug(f"numEntries = {self.numEntries}")
if self.bgl_numEntries and self.bgl_numEntries != self.numEntries:
# There are a number of cases when these numbers do not match.
# The dictionary is OK, and these is no doubt that we might missed
# an entry.
# self.bgl_numEntries may be less than the number of entries
# we've read.
log.warning(
f"bgl_numEntries={self.bgl_numEntries}"
f", numEntries={self.numEntries}",
)
self.numBlocks = 0
encoding = self.targetEncoding # FIXME: confirm this is correct
for key, value in self.info.items():
if isinstance(value, bytes):
try:
value = value.decode(encoding) # noqa: PLW2901
except Exception:
log.warning(f"failed to decode info value: {key} = {value}")
else:
self.info[key] = value
def setGlossaryInfo(self) -> None:
glos = self._glos
###
if self.sourceLang:
glos.sourceLangName = self.sourceLang.name
if self.sourceLang.name2:
glos.setInfo("sourceLang2", self.sourceLang.name2)
if self.targetLang:
glos.targetLangName = self.targetLang.name
if self.targetLang.name2:
glos.setInfo("targetLang2", self.targetLang.name2)
###
for attr in (
"defaultCharset",
"sourceCharset",
"targetCharset",
"defaultEncoding",
"sourceEncoding",
"targetEncoding",
):
value = getattr(self, attr, None)
if value:
glos.setInfo("bgl_" + attr, value)
###
glos.setInfo("sourceCharset", "UTF-8")
glos.setInfo("targetCharset", "UTF-8")
###
if "lastUpdated" not in self.info and "bgl_firstUpdated" in self.info:
log.debug("replacing bgl_firstUpdated with lastUpdated")
self.info["lastUpdated"] = self.info.pop("bgl_firstUpdated")
###
for key, value in self.info.items():
s_value = str(value).strip("\x00")
if not s_value:
continue
# TODO: a bool flag to add empty value infos?
# leave "creationTime" and "lastUpdated" as is
if key == "utf8Encoding":
key = "bgl_" + key # noqa: PLW2901
try:
glos.setInfo(key, s_value)
except Exception:
log.exception(f"key = {key}")
def isEndOfDictData(self) -> bool: # noqa: PLR6301
"""
Test for end of dictionary data.
A bgl file stores dictionary data as a gzip compressed block.
In other words, a bgl file stores a gzip data file inside.
A gzip file consists of a series of "members".
gzip data block in bgl consists of one member (I guess).
Testing for block type returned by self.readBlock is not a
reliable way to detect the end of gzip member.
For example, consider "Airport Code Dictionary.BGL" dictionary.
To reliably test for end of gzip member block we must use a number
of undocumented variables of gzip.GzipFile class.
self.file._new_member - true if the current member has been
completely read from the input file
self.file.extrasize - size of buffered data
self.file.offset - offset in the input file
after reading one gzip member current position in the input file
is set to the first byte after gzip data
We may get this offset: self.file_bgl.tell()
The last 4 bytes of gzip block contains the size of the original
(uncompressed) input data modulo 2^32
"""
return False
def close(self) -> None:
if self.file:
self.file.close()
self.file = None
def __del__(self) -> None:
self.close()
while unknownHtmlEntries:
entity = unknownHtmlEntries.pop()
log.debug(f"BGL: unknown html entity: {entity}")
# returns False if error
def readBlock(self, block: Block) -> bool:
block.offset = self.file.tell()
length = self.readBytes(1)
if length == -1:
log.debug("readBlock: length = -1")
return False
block.type = length & 0xF
length >>= 4
if length < 4:
length = self.readBytes(length + 1)
if length == -1:
log.error("readBlock: length = -1")
return False
else:
length -= 4
self.file.flush()
if length > 0:
try:
block.data = self.file.read(length)
except Exception:
# struct.error: unpack requires a string argument of length 4
# FIXME
log.exception(
"failed to read block data"
f": numBlocks={self.numBlocks}"
f", length={length}"
f", filePos={self.file.tell()}",
)
block.data = b""
return False
else:
block.data = b""
return True
def readBytes(self, num: int) -> int:
"""Return -1 if error."""
if num < 1 or num > 4:
log.error(f"invalid argument num={num}")
return -1
self.file.flush()
buf = self.file.read(num)
if len(buf) == 0:
log.debug("readBytes: end of file: len(buf)==0")
return -1
if len(buf) != num:
log.error(
f"readBytes: expected to read {num} bytes"
f", but found {len(buf)} bytes",
)
return -1
return uintFromBytes(buf)
def readType0(self, block: Block) -> bool:
code = block.data[0]
if code == 2:
# this number is vary close to self.bgl_numEntries,
# but does not always equal to the number of entries
# see self.readType3, code == 12 as well
# num = uintFromBytes(block.data[1:])
pass
elif code == 8:
self.defaultCharset = charsetInfoDecode(block.data[1:])
if not self.defaultCharset:
log.warning("defaultCharset is not valid")
else:
self.logUnknownBlock(block)
return False
return True
def readType2(self, block: Block) -> EntryType | None:
"""
Process type 2 block.
Type 2 block is an embedded file (mostly Image or HTML).
pass_num - pass number, may be 1 or 2
On the first pass self.sourceEncoding is not defined and we cannot
decode file names.
That is why the second pass is needed. The second pass is costly, it
apparently increases total processing time. We should avoid the second
pass if possible.
Most of the dictionaries do not have valuable resources, and those
that do, use file names consisting only of ASCII characters. We may
process these resources on the second pass. If all files have been
processed on the first pass, the second pass is not needed.
All dictionaries I've processed so far use only ASCII chars in
file names.
Babylon glossary builder replaces names of files, like links to images,
with what looks like a hash code of the file name,
for example "8FFC5C68.png".
returns: DataEntry instance if the resource was successfully processed
and None if failed
"""
# Embedded File (mostly Image or HTML)
pos = 0
# name:
Len = block.data[pos]
pos += 1
if pos + Len > len(block.data):
log.warning("reading block type 2: name too long")
return None
b_name = block.data[pos : pos + Len]
pos += Len
b_data = block.data[pos:]
# if b_name in (b"C2EEF3F6.html", b"8EAF66FD.bmp"):
# log.debug(f"Skipping useless file {b_name!r}")
# return
u_name = b_name.decode(self.sourceEncoding)
return self._glos.newDataEntry(
u_name,
b_data,
)
def readType3(self, block: Block) -> None:
"""
Reads block with type 3, and updates self.info
returns None.
"""
code, b_value = uintFromBytes(block.data[:2]), block.data[2:]
if not b_value:
return
# if not b_value.strip(b"\x00"): return # FIXME
try:
item = infoType3ByCode[code]
except KeyError:
if b_value.strip(b"\x00"):
log.debug(
f"Unknown info type code={code:#02x}, b_value={b_value!r}",
)
return
key = item.name
decode = item.decode
if key.endswith(".ico"):
self.iconDataList.append((key, b_value))
return
value = b_value if decode is None else decode(b_value)
# `value` can be None, str, bytes or dict
if not value:
return
if key == "bgl_about":
self.aboutBytes = value["about"]
self.aboutExt = value["about_extension"]
return
if isinstance(value, dict):
self.info.update(value)
return
if item.attr:
setattr(self, key, value)
return
self.info[key] = value
def detectEncoding(self) -> None: # noqa: PLR0912
"""Assign self.sourceEncoding and self.targetEncoding."""
utf8Encoding = self.info.get("utf8Encoding", False)
if self._default_encoding_overwrite:
self.defaultEncoding = self._default_encoding_overwrite
elif self.defaultCharset:
self.defaultEncoding = self.defaultCharset
else:
self.defaultEncoding = "cp1252"
if self._source_encoding_overwrite:
self.sourceEncoding = self._source_encoding_overwrite
elif utf8Encoding:
self.sourceEncoding = "utf-8"
elif self.sourceCharset:
self.sourceEncoding = self.sourceCharset
elif self.sourceLang:
self.sourceEncoding = self.sourceLang.encoding
else:
self.sourceEncoding = self.defaultEncoding
if self._target_encoding_overwrite:
self.targetEncoding = self._target_encoding_overwrite
elif utf8Encoding:
self.targetEncoding = "utf-8"
elif self.targetCharset:
self.targetEncoding = self.targetCharset
elif self.targetLang:
self.targetEncoding = self.targetLang.encoding
else:
self.targetEncoding = self.defaultEncoding
def logUnknownBlock(self, block: Block) -> None:
log.debug(
f"Unknown block: type={block.type}"
f", number={self.numBlocks}"
f", data={block.data!r}",
)
def __iter__(self) -> Iterator[EntryType]: # noqa: PLR0912
if not self.file:
raise RuntimeError("iterating over a reader while it's not open")
for fname, iconData in self.iconDataList:
yield self._glos.newDataEntry(fname, iconData)
if self.aboutBytes:
yield self._glos.newDataEntry(
"about" + self.aboutExt,
self.aboutBytes,
)
block = Block()
while not self.isEndOfDictData():
if not self.readBlock(block):
break
if not block.data:
continue
if block.type == 2:
yield self.readType2(block)
elif block.type == 11:
succeed, _u_word, u_alts, u_defi = self.readEntry_Type11(block)
if not succeed:
continue
yield self._glos.newEntry(
[_u_word] + u_alts,
u_defi,
)
elif block.type in {1, 7, 10, 11, 13}:
pos = 0
# word:
wordData = self.readEntryWord(block, pos)
if not wordData:
continue
pos = wordData.pos
# defi:
succeed, pos, u_defi, _b_defi = self.readEntryDefi(
block,
pos,
wordData,
)
if not succeed:
continue
# now pos points to the first char after definition
succeed, pos, u_alts = self.readEntryAlts(
block,
pos,
wordData,
)
if not succeed:
continue
yield self._glos.newEntry(
[wordData.u_word] + u_alts,
u_defi,
)
def readEntryWord(
self,
block: Block,
pos: int,
) -> EntryWordData | None:
"""
Read word part of entry.
Return None on error
"""
if pos + 1 > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
", reading word size: pos + 1 > len(block.data)",
)
return None
Len = block.data[pos]
pos += 1
if pos + Len > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
f", block.type={block.type}"
", reading word: pos + Len > len(block.data)",
)
return None
b_word = block.data[pos : pos + Len]
u_word, u_word_html = self.processKey(b_word)
"""
Entry keys may contain html text, for example:
ante<font face'Lucida Sans Unicode'>< meridiem
arm und reich c=t>2003;</charset>
</font>und<font face='Lucida Sans Unicode'>
etc.
Babylon does not process keys as html, it display them as is.
Html in keys is the problem of that particular dictionary.
We should not process keys as html, since Babylon do not process
them as such.
"""
pos += Len
self.wordLenMax = max(self.wordLenMax, len(u_word))
return EntryWordData(
pos=pos,
u_word=u_word.strip(),
b_word=b_word.strip(),
u_word_html=u_word_html,
)
def readEntryDefi(
self,
block: Block,
pos: int,
word: EntryWordData,
) -> tuple[bool, int | None, bytes | None, bytes | None]:
"""
Read defi part of entry.
Return value is a list.
(False, None, None, None) if error
(True, pos, u_defi, b_defi) if OK
u_defi is a str instance (utf-8)
b_defi is a bytes instance
"""
Err = (False, None, None, None)
if pos + 2 > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
", reading defi size: pos + 2 > len(block.data)",
)
return Err
Len = uintFromBytes(block.data[pos : pos + 2])
pos += 2
if pos + Len > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
f", block.type={block.type}"
", reading defi: pos + Len > len(block.data)",
)
return Err
b_defi = block.data[pos : pos + Len]
u_defi = self.processDefi(b_defi, word.b_word)
# I was going to add this u_word_html or "formatted headword" to defi,
# so to lose this information, but after looking at the diff
# for 8 such glossaries, I decided it's not useful enough!
# if word.u_word_html:
# u_defi = f"<div><b>{word.u_word_html}</b></div>" + u_defi
self.defiMaxBytes = max(self.defiMaxBytes, len(b_defi))
pos += Len
return True, pos, u_defi, b_defi
def readEntryAlts(
self,
block: Block,
pos: int,
word: EntryWordData,
) -> tuple[bool, int | None, list[str] | None]:
"""
Returns
-------
(False, None, None) if error
(True, pos, u_alts) if succeed
u_alts is a sorted list, items are str (utf-8).
"""
Err = (False, None, None)
# use set instead of list to prevent duplicates
u_alts = set()
while pos < len(block.data):
if pos + 1 > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
", reading alt size: pos + 1 > len(block.data)",
)
return Err
Len = block.data[pos]
pos += 1
if pos + Len > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
f", block.type={block.type}"
", reading alt: pos + Len > len(block.data)",
)
return Err
b_alt = block.data[pos : pos + Len]
u_alt = self.processAlternativeKey(b_alt, word.b_word)
# Like entry key, alt is not processed as html by babylon,
# so do we.
u_alts.add(u_alt)
pos += Len
u_alts.discard(word.u_word)
return True, pos, sorted(u_alts)
def readEntry_Type11(
self,
block: Block,
) -> tuple[bool, str | None, list[str] | None, str | None]:
"""Return (succeed, u_word, u_alts, u_defi)."""
Err = (False, None, None, None)
pos = 0
# reading headword
if pos + 5 > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
", reading word size: pos + 5 > len(block.data)",
)
return Err
wordLen = uintFromBytes(block.data[pos : pos + 5])
pos += 5
if pos + wordLen > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
f", block.type={block.type}"
", reading word: pos + wordLen > len(block.data)",
)
return Err
b_word = block.data[pos : pos + wordLen]
u_word, _u_word_html = self.processKey(b_word)
pos += wordLen
self.wordLenMax = max(self.wordLenMax, len(u_word))
# reading alts and defi
if pos + 4 > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
", reading defi size: pos + 4 > len(block.data)",
)
return Err
altsCount = uintFromBytes(block.data[pos : pos + 4])
pos += 4
# reading alts
# use set instead of list to prevent duplicates
u_alts = set()
for _ in range(altsCount):
if pos + 4 > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
", reading alt size: pos + 4 > len(block.data)",
)
return Err
altLen = uintFromBytes(block.data[pos : pos + 4])
pos += 4
if altLen == 0:
if pos + altLen != len(block.data):
# no evidence
log.warning(
f"reading block offset={block.offset:#02x}"
", reading alt size: pos + altLen != len(block.data)",
)
break
if pos + altLen > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
f", block.type={block.type}"
", reading alt: pos + altLen > len(block.data)",
)
return Err
b_alt = block.data[pos : pos + altLen]
u_alt = self.processAlternativeKey(b_alt, b_word)
# Like entry key, alt is not processed as html by babylon,
# so do we.
u_alts.add(u_alt)
pos += altLen
u_alts.discard(u_word)
# reading defi
defiLen = uintFromBytes(block.data[pos : pos + 4])
pos += 4
if pos + defiLen > len(block.data):
log.error(
f"reading block offset={block.offset:#02x}"
f", block.type={block.type}"
", reading defi: pos + defiLen > len(block.data)",
)
return Err
b_defi = block.data[pos : pos + defiLen]
u_defi = self.processDefi(b_defi, b_word)
self.defiMaxBytes = max(self.defiMaxBytes, len(b_defi))
pos += defiLen
return True, u_word, sorted(u_alts), u_defi
def charReferencesStat(self, b_text: bytes, encoding: str) -> None:
pass
@staticmethod
def decodeCharsetTagsBabylonReference(b_text: bytes, b_text2: bytes):
b_refs = b_text2.split(b";")
add_text = ""
for i_ref, b_ref in enumerate(b_refs):
if not b_ref:
if i_ref != len(b_refs) - 1:
log.debug(
f"decoding charset tags, b_text={b_text!r}"
"\nblank <charset c=t> character"
f" reference ({b_text2!r})\n",
)
continue
if not re_b_reference.match(b_ref):
log.debug(
f"decoding charset tags, b_text={b_text!r}"
"\ninvalid <charset c=t> character"
f" reference ({b_text2!r})\n",
)
continue
add_text += chr(int(b_ref, 16))
return add_text
def decodeCharsetTagsTextBlock(
self,
encoding: str,
b_text: bytes,
b_part: bytes,
) -> str:
b_text2 = b_part
if encoding == "babylon-reference":
return self.decodeCharsetTagsBabylonReference(b_text, b_text2)
self.charReferencesStat(b_text2, encoding)
if encoding == "cp1252":
b_text2 = replaceAsciiCharRefs(b_text2)
if self._strict_string_conversion:
try:
u_text2 = b_text2.decode(encoding)
except UnicodeError:
log.debug(
f"decoding charset tags, b_text={b_text!r}"
f"\nfragment: {b_text2!r}"
"\nconversion error:\n" + excMessage(),
)
u_text2 = b_text2.decode(encoding, "replace")
else:
u_text2 = b_text2.decode(encoding, "replace")
return u_text2
def decodeCharsetTags( # noqa: PLR0912
self,
b_text: bytes,
defaultEncoding: str,
) -> tuple[str, str]:
"""
b_text is a bytes
Decode html text taking into account charset tags and default encoding.
Return value: (u_text, defaultEncodingOnly)
u_text is str
defaultEncodingOnly parameter is false if the text contains parts
encoded with non-default encoding (babylon character references
'<CHARSET c="T">00E6;</CHARSET>' do not count).
"""
b_parts = re_charset_decode.split(b_text)
u_text = ""
encodings = [] # stack of encodings
defaultEncodingOnly = True
for i, b_part in enumerate(b_parts):
if i % 3 == 0: # text block
encoding = encodings[-1] if encodings else defaultEncoding
u_text += self.decodeCharsetTagsTextBlock(encoding, b_text, b_part)
if encoding != defaultEncoding:
defaultEncodingOnly = False
continue
if i % 3 == 1: # <charset...> or </charset>
if b_part.startswith(b"</"):
# </charset>
if encodings:
encodings.pop()
else:
log.debug(
f"decoding charset tags, b_text={b_text!r}"
"\nunbalanced </charset> tag\n",
)
continue
# <charset c="?">
b_type = b_parts[i + 1].lower()
# b_type is a bytes instance, with length 1
if b_type == b"t":
encodings.append("babylon-reference")
elif b_type == b"u":
encodings.append("utf-8")
elif b_type == b"k": # noqa: SIM114
encodings.append(self.sourceEncoding)
elif b_type == b"e":
encodings.append(self.sourceEncoding)
elif b_type == b"g":
# gbk or gb18030 encoding
# (not enough data to make distinction)
encodings.append("gbk")
else:
log.debug(
f"decoding charset tags, text = {b_text!r}"
f"\nunknown charset code = {ord(b_type):#02x}\n",
)
# add any encoding to prevent
# "unbalanced </charset> tag" error
encodings.append(defaultEncoding)
continue
# c attribute of charset tag if the previous tag was charset
if encodings:
log.debug(
f"decoding charset tags, text={b_text}\nunclosed <charset...> tag\n",
)
return u_text, defaultEncodingOnly
def processKey(self, b_word: bytes) -> tuple[str, str]:
"""
b_word is a bytes instance
returns (u_word: str, u_word_html: str)
u_word_html is empty unless it's different from u_word.
"""
b_word, strip_count = stripDollarIndexes(b_word)
if strip_count > 1:
log.debug(
f"processKey({b_word}):\nnumber of dollar indexes = {strip_count}",
)
# convert to unicode
if self._strict_string_conversion:
try:
u_word = b_word.decode(self.sourceEncoding)
except UnicodeError:
log.debug(
f"processKey({b_word}):\nconversion error:\n" + excMessage(),
)
u_word = b_word.decode(
self.sourceEncoding,
"ignore",
)
else:
u_word = b_word.decode(self.sourceEncoding, "ignore")
u_word_html = ""
if self._process_html_in_key:
u_word = replaceHtmlEntriesInKeys(u_word)
# u_word = u_word.replace("<BR>", "").replace("<BR/>", "")\
# .replace("<br>", "").replace("<br/>", "")
_u_word_copy = u_word
u_word = stripHtmlTags(u_word)
if u_word != _u_word_copy:
u_word_html = _u_word_copy
# if(re.match(".*[&<>].*", _u_word_copy)):
# log.debug("original text: " + _u_word_copy + "\n" \
# + "new text: " + u_word + "\n")
u_word = removeControlChars(u_word)
u_word = removeNewlines(u_word)
u_word = u_word.lstrip()
if self._key_rstrip_chars:
u_word = u_word.rstrip(self._key_rstrip_chars)
return u_word, u_word_html
def processAlternativeKey(self, b_word: bytes, b_key: bytes) -> str:
"""
b_word is a bytes instance
returns u_word_main, as str instance (utf-8 encoding).
"""
b_word_main, _strip_count = stripDollarIndexes(b_word)
# convert to unicode
if self._strict_string_conversion:
try:
u_word_main = b_word_main.decode(self.sourceEncoding)
except UnicodeError:
log.debug(
f"processAlternativeKey({b_word})\nkey = {b_key}"
":\nconversion error:\n" + excMessage(),
)
u_word_main = b_word_main.decode(self.sourceEncoding, "ignore")
else:
u_word_main = b_word_main.decode(self.sourceEncoding, "ignore")
# strip "/" before words
u_word_main = self.stripSlashAltKeyPattern.sub(
r"\1\2",
u_word_main,
)
if self._process_html_in_key:
# u_word_main_orig = u_word_main
u_word_main = stripHtmlTags(u_word_main)
u_word_main = replaceHtmlEntriesInKeys(u_word_main)
# if(re.match(".*[&<>].*", u_word_main_orig)):
# log.debug("original text: " + u_word_main_orig + "\n" \
# + "new text: " + u_word_main + "\n")
u_word_main = removeControlChars(u_word_main)
u_word_main = removeNewlines(u_word_main)
u_word_main = u_word_main.lstrip()
return u_word_main.rstrip(self._key_rstrip_chars)
# TODO: break it down
# PLR0912 Too many branches (20 > 12)
def processDefi(self, b_defi: bytes, b_key: bytes) -> str: # noqa: PLR0912
"""
b_defi: bytes
b_key: bytes.
return: u_defi_format
"""
fields = DefinitionFields()
self.collectDefiFields(b_defi, b_key, fields)
fields.u_defi, fields.singleEncoding = self.decodeCharsetTags(
fields.b_defi,
self.targetEncoding,
)
if fields.singleEncoding:
fields.encoding = self.targetEncoding
fields.u_defi = fixImgLinks(fields.u_defi)
fields.u_defi = replaceHtmlEntries(fields.u_defi)
fields.u_defi = removeControlChars(fields.u_defi)
fields.u_defi = normalizeNewlines(fields.u_defi)
fields.u_defi = fields.u_defi.strip()
if fields.b_title:
fields.u_title, _singleEncoding = self.decodeCharsetTags(
fields.b_title,
self.sourceEncoding,
)
fields.u_title = replaceHtmlEntries(fields.u_title)
fields.u_title = removeControlChars(fields.u_title)
if fields.b_title_trans:
# sourceEncoding or targetEncoding ?
fields.u_title_trans, _singleEncoding = self.decodeCharsetTags(
fields.b_title_trans,
self.sourceEncoding,
)
fields.u_title_trans = replaceHtmlEntries(fields.u_title_trans)
fields.u_title_trans = removeControlChars(fields.u_title_trans)
if fields.b_transcription_50:
if fields.code_transcription_50 == 0x10:
# contains values like this (char codes):
# 00 18 00 19 00 1A 00 1B 00 1C 00 1D 00 1E 00 40 00 07
# this is not utf-16
# what is this?
pass
elif fields.code_transcription_50 == 0x1B:
fields.u_transcription_50, _singleEncoding = self.decodeCharsetTags(
fields.b_transcription_50,
self.sourceEncoding,
)
fields.u_transcription_50 = replaceHtmlEntries(
fields.u_transcription_50,
)
fields.u_transcription_50 = removeControlChars(
fields.u_transcription_50,
)
elif fields.code_transcription_50 == 0x18:
# incomplete text like:
# t c=T>02D0;</charset>g<charset c=T>0259;</charset>-
# This defi normally contains fields.b_transcription_60
# in this case.
pass
else:
log.debug(
f"processDefi({b_defi})\nb_key = {b_key}"
":\ndefi field 50"
f", unknown code: {fields.code_transcription_50:#02x}",
)
if fields.b_transcription_60:
if fields.code_transcription_60 == 0x1B:
fields.u_transcription_60, _singleEncoding = self.decodeCharsetTags(
fields.b_transcription_60,
self.sourceEncoding,
)
fields.u_transcription_60 = replaceHtmlEntries(
fields.u_transcription_60,
)
fields.u_transcription_60 = removeControlChars(
fields.u_transcription_60,
)
else:
log.debug(
f"processDefi({b_defi})\nb_key = {b_key}"
":\ndefi field 60"
f", unknown code: {fields.code_transcription_60:#02x}",
)
if fields.b_field_1a:
fields.u_field_1a, _singleEncoding = self.decodeCharsetTags(
fields.b_field_1a,
self.sourceEncoding,
)
log.info(f"------- u_field_1a = {fields.u_field_1a}")
self.processDefiStat(fields, b_defi, b_key)
u_defi_format = ""
if fields.partOfSpeech or fields.u_title:
if fields.partOfSpeech:
pos = xml_escape(fields.partOfSpeech)
posColor = self._part_of_speech_color
u_defi_format += f'<font color="#{posColor}">{pos}</font>'
if fields.u_title:
if u_defi_format:
u_defi_format += " "
u_defi_format += fields.u_title
u_defi_format += "<br>\n"
if fields.u_title_trans:
u_defi_format += fields.u_title_trans + "<br>\n"
if fields.u_transcription_50:
u_defi_format += f"[{fields.u_transcription_50}]<br>\n"
if fields.u_transcription_60:
u_defi_format += f"[{fields.u_transcription_60}]<br>\n"
if fields.u_defi:
u_defi_format += fields.u_defi
return u_defi_format.removesuffix("<br>").removesuffix("<BR>")
def processDefiStat(
self,
fields: DefinitionFields,
b_defi: bytes,
b_key: bytes,
) -> None:
pass
def findDefiFieldsStart(self, b_defi: bytes) -> int:
r"""
Find the beginning of the definition trailing fields.
Return value is the index of the first chars of the field set,
or -1 if the field set is not found.
Normally "\x14" should signal the beginning of the definition fields,
but some articles may contain this characters inside, so we get false
match.
As a workaround we may check the following chars. If "\x14" is followed
by space, we assume this is part of the article and continue search.
Unfortunately this does no help in many cases...
"""
if self._no_control_sequence_in_defi:
return -1
index = -1
while True:
index = b_defi.find(
0x14,
index + 1, # starting from next character
-1, # not the last character
)
if index == -1:
break
if b_defi[index + 1] != 0x20: # b" "[0] == 0x20
break
return index
# TODO: break it down
# PLR0912 Too many branches (41 > 12)
def collectDefiFields( # noqa: PLR0912
self,
b_defi: bytes,
b_key: bytes,
fields: DefinitionFields,
) -> None:
r"""
Entry definition structure:
<main definition>['\x14'[{field_code}{field_data}]*]
{field_code} is one character
{field_data} has arbitrary length.
"""
# d0 is index of the '\x14 char in b_defi
# d0 may be the last char of the string
d0 = self.findDefiFieldsStart(b_defi)
if d0 == -1:
fields.b_defi = b_defi
return
fields.b_defi = b_defi[:d0]
i = d0 + 1
while i < len(b_defi):
if self.metadata2:
self.metadata2.defiTrailingFields[b_defi[i]] += 1
if b_defi[i] == 0x02:
# part of speech # "\x02" <one char - part of speech>
if fields.partOfSpeech:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}"
":\nduplicate part of speech item",
)
if i + 1 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nb_defi ends after \\x02",
)
return
posCode = b_defi[i + 1]
try:
fields.partOfSpeech = partOfSpeechByCode[posCode]
except KeyError:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}"
f":\nunknown part of speech code = {posCode:#02x}",
)
return
i += 2
elif b_defi[i] == 0x06: # \x06<one byte>
if fields.b_field_06:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nduplicate type 6",
)
if i + 1 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nb_defi ends after \\x06",
)
return
fields.b_field_06 = b_defi[i + 1]
i += 2
elif b_defi[i] == 0x07: # \x07<two bytes>
# Found in 4 Hebrew dictionaries. I do not understand.
if i + 3 > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x07",
)
return
fields.b_field_07 = b_defi[i + 1 : i + 3]
i += 3
elif b_defi[i] == 0x13: # "\x13"<one byte - length><data>
# known values:
# 03 06 0D C7
# 04 00 00 00 44
# ...
# 04 00 00 00 5F
if i + 1 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x13",
)
return
Len = b_defi[i + 1]
i += 2
if Len == 0:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}\n"
f"b_key = {b_key!r}:\nblank data after \\x13",
)
continue
if i + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}\n"
f"b_key = {b_key!r}:\ntoo few data after \\x13",
)
return
fields.b_field_13 = b_defi[i : i + Len]
i += Len
elif b_defi[i] == 0x18:
# \x18<one byte - title length><entry title>
if fields.b_title:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"b_key = {b_key!r}:\nduplicate entry title item",
)
if i + 1 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}\n"
f"b_key = {b_key!r}:\nb_defi ends after \\x18",
)
return
i += 1
Len = b_defi[i]
i += 1
if Len == 0:
# log.debug(
# f"collecting definition fields, b_defi = {b_defi!r}\n"
# f"b_key = {b_key!r}:\nblank entry title"
# )
continue
if i + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}\n"
f"b_key = {b_key!r}:\ntitle is too long",
)
return
fields.b_title = b_defi[i : i + Len]
i += Len
elif b_defi[i] == 0x1A: # "\x1a"<one byte - length><text>
# found only in Hebrew dictionaries, I do not understand.
if i + 1 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key}:\ntoo few data after \\x1a",
)
return
Len = b_defi[i + 1]
i += 2
if Len == 0:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nblank data after \\x1a",
)
continue
if i + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x1a",
)
return
fields.b_field_1a = b_defi[i : i + Len]
i += Len
elif b_defi[i] == 0x28: # "\x28" <two bytes - length><html text>
# title with transcription?
if i + 2 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x28",
)
return
i += 1
Len = uintFromBytes(b_defi[i : i + 2])
i += 2
if Len == 0:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nblank data after \\x28",
)
continue
if i + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x28",
)
return
fields.b_title_trans = b_defi[i : i + Len]
i += Len
elif 0x40 <= b_defi[i] <= 0x4F: # [\x41-\x4f] <one byte> <text>
# often contains digits as text:
# 56
# ælps - key Alps
# 48@i
# has no apparent influence on the article
code = b_defi[i]
Len = b_defi[i] - 0x3F
if i + 2 + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x40+",
)
return
i += 2
b_text = b_defi[i : i + Len]
i += Len
log.debug(
f"unknown definition field {code:#02x}, b_text={b_text!r}",
)
elif b_defi[i] == 0x50:
# \x50 <one byte> <one byte - length><data>
if i + 2 >= len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x50",
)
return
fields.code_transcription_50 = b_defi[i + 1]
Len = b_defi[i + 2]
i += 3
if Len == 0:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nblank data after \\x50",
)
continue
if i + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x50",
)
return
fields.b_transcription_50 = b_defi[i : i + Len]
i += Len
elif b_defi[i] == 0x60:
# "\x60" <one byte> <two bytes - length> <text>
if i + 4 > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x60",
)
return
fields.code_transcription_60 = b_defi[i + 1]
i += 2
Len = uintFromBytes(b_defi[i : i + 2])
i += 2
if Len == 0:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\nblank data after \\x60",
)
continue
if i + Len > len(b_defi):
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}:\ntoo few data after \\x60",
)
return
fields.b_transcription_60 = b_defi[i : i + Len]
i += Len
else:
log.debug(
f"collecting definition fields, b_defi = {b_defi!r}"
f"\nb_key = {b_key!r}"
f":\nunknown control char. Char code = {b_defi[i]:#02x}",
)
return
| 48,114
|
Python
|
.py
| 1,531
| 27.504246
| 84
| 0.66877
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,879
|
bgl_gzip.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_gzip.py
|
"""Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed.
"""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import logging
import builtins
import io
import os
import struct
import time
import zlib
import _compression
__all__ = ["BadGzipFile", "GzipFile"]
log = logging.getLogger('root')
_FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
_COMPRESS_LEVEL_FAST = 1
_COMPRESS_LEVEL_TRADEOFF = 6
_COMPRESS_LEVEL_BEST = 9
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
class _PaddedFile:
"""Minimal read-only file object that prepends a string to the contents
of an actual file. Shouldn't be used outside of gzip.py, as it lacks
essential functionality.
"""
def __init__(self, f, prepend=b''):
self._buffer = prepend
self._length = len(prepend)
self.file = f
self._read = 0
def read(self, size):
if self._read is None:
return self.file.read(size)
if self._read + size <= self._length:
read = self._read
self._read += size
return self._buffer[read:self._read]
read = self._read
self._read = None
return self._buffer[read:] + self.file.read(
size - self._length + read,
)
def prepend(self, prepend=b''):
if self._read is None:
self._buffer = prepend
else: # Assume data was read since the last prepend() call
self._read -= len(prepend)
return
self._length = len(self._buffer)
self._read = 0
def seek(self, off):
self._read = None
self._buffer = None
return self.file.seek(off)
def seekable(self):
# to comply with io.IOBase
return True # Allows fast-forwarding even in unseekable streams
class BadGzipFile(OSError):
"""Exception raised in some cases for invalid gzip files."""
class GzipFile(_compression.BaseStream):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the truncate() method.
This class only supports opening files in binary mode. If you need to open a
compressed file in text mode, use the gzip.open() function.
"""
# Overridden with internal file object to be closed, if only a filename
# is passed in
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=_COMPRESS_LEVEL_BEST, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, an io.BytesIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may include the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
'xb' depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
'wb', 'a' and 'ab', and 'x' and 'xb'.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the last modification time field in the stream when compressing.
If omitted or None, the current time is used.
"""
if mode and ('t' in mode or 'U' in mode):
raise ValueError(f"Invalid mode: {mode!r}")
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
if filename is None:
filename = getattr(fileobj, 'name', '')
if not isinstance(filename, (str, bytes)):
filename = ''
else:
filename = os.fspath(filename)
origmode = mode
if mode is None:
mode = getattr(fileobj, 'mode', 'rb')
if mode.startswith('r'):
self.mode = READ
raw = _GzipReader(fileobj)
self._buffer = io.BufferedReader(raw)
self.name = filename
elif mode.startswith(('w', 'a', 'x')):
if origmode is None:
import warnings
warnings.warn(
"GzipFile was opened for writing, but this will "
"change in future Python releases. "
"Specify the mode argument for opening it for writing.",
FutureWarning, 2)
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._write_mtime = mtime
else:
raise ValueError(f"Invalid mode: {mode!r}")
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header(compresslevel)
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
@property
def mtime(self):
"""Last modification time read from stream, or None."""
return self._buffer.raw._last_mtime
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32(b"")
self.size = 0
self.offset = 0 # Current file offset for seek(), tell(), etc
def _write_gzip_header(self, compresslevel):
self.fileobj.write(b'\037\213') # magic header
self.fileobj.write(b'\010') # compression method
try:
# RFC 1952 requires the FNAME field to be Latin-1. Do not
# include filenames that cannot be represented that way.
fname = os.path.basename(self.name)
if not isinstance(fname, bytes):
fname = fname.encode('latin-1')
if fname.endswith(b'.gz'):
fname = fname[:-3]
except UnicodeEncodeError:
fname = b''
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags).encode('latin-1'))
mtime = self._write_mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, int(mtime))
if compresslevel == _COMPRESS_LEVEL_BEST:
xfl = b'\002'
elif compresslevel == _COMPRESS_LEVEL_FAST:
xfl = b'\004'
else:
xfl = b'\000'
self.fileobj.write(xfl)
self.fileobj.write(b'\377')
if fname:
self.fileobj.write(fname + b'\000')
def write(self, data):
self._check_not_closed()
if self.mode != WRITE:
import errno
raise OSError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError("write() on closed GzipFile object")
if isinstance(data, (bytes, bytearray)):
length = len(data)
else:
# accept any data that supports the buffer protocol
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(self.compress.compress(data))
self.size += length
self.crc = zlib.crc32(data, self.crc)
self.offset += length
return length
def read(self, size=-1):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read() on write-only GzipFile object")
return self._buffer.read(size)
def read1(self, size=-1):
"""Implements BufferedIOBase.read1().
Reads up to a buffer's worth of data if size is negative.
"""
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read1() on write-only GzipFile object")
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def peek(self, n):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "peek() on write-only GzipFile object")
return self._buffer.peek(n)
@property
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj = None
try:
if self.mode == WRITE:
fileobj.write(self.compress.flush())
write32u(fileobj, self.crc)
# self.size may exceed 2 GiB, or even 4 GiB
write32u(fileobj, self.size & 0xffffffff)
elif self.mode == READ:
self._buffer.close()
finally:
myfileobj = self.myfileobj
if myfileobj:
self.myfileobj = None
myfileobj.close()
def flush(self, zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_not_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file.
'''
if self.mode != READ:
raise OSError("Can't rewind in write mode")
self._buffer.seek(0)
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if self.mode == WRITE:
if whence != io.SEEK_SET:
if whence == io.SEEK_CUR:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if offset < self.offset:
raise OSError('Negative seek in write mode')
count = offset - self.offset
chunk = b'\0' * 1024
for _ in range(count // 1024):
self.write(chunk)
self.write(b'\0' * (count % 1024))
elif self.mode == READ:
self._check_not_closed()
return self._buffer.seek(offset, whence)
return self.offset
def readline(self, size=-1):
self._check_not_closed()
return self._buffer.readline(size)
def _read_exact(fp, n):
'''Read exactly *n* bytes from `fp`.
This method is required because fp may be unbuffered,
i.e. return short reads.
'''
data = fp.read(n)
while len(data) < n:
b = fp.read(n - len(data))
if not b:
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
data += b
return data
def _read_gzip_header(fp):
'''Read a gzip header from `fp` and progress to the end of the header.
Returns last mtime if header was present or None otherwise.
'''
magic = fp.read(2)
if magic == b'':
return None
if magic != b'\037\213':
raise BadGzipFile(f'Not a gzipped file ({magic!r})')
(method, flag, last_mtime) = struct.unpack("<BBIxx", _read_exact(fp, 8))
if method != 8:
raise BadGzipFile('Unknown compression method')
if flag & FEXTRA:
# Read & discard the extra field, if present
extra_len, = struct.unpack("<H", _read_exact(fp, 2))
_read_exact(fp, extra_len)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = fp.read(1)
if not s or s == b'\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = fp.read(1)
if not s or s == b'\000':
break
if flag & FHCRC:
_read_exact(fp, 2) # Read & discard the 16-bit header CRC
return last_mtime
class _GzipReader(_compression.DecompressReader):
def __init__(self, fp):
super().__init__(_PaddedFile(fp), zlib.decompressobj,
wbits=-zlib.MAX_WBITS)
# Set flag indicating start of a new member
self._new_member = True
self._last_mtime = None
def _init_read(self):
self._crc = zlib.crc32(b"")
self._stream_size = 0 # Decompressed size of unconcatenated stream
def _read_gzip_header(self):
last_mtime = _read_gzip_header(self._fp)
if last_mtime is None:
return False
self._last_mtime = last_mtime
return True
def read(self, size=-1):
if size < 0:
return self.readall()
# size=0 is special because decompress(max_length=0) is not supported
if not size:
return b""
# For certain input data, a single
# call to decompress() may not return
# any data. In this case, retry until we get some data or reach EOF.
while True:
if self._decompressor.eof:
# Ending case: we've come to the end of a member in the file,
# so finish up this member, and read a new gzip header.
# Check the CRC and file size, and set the flag so we read
# a new member
self._read_eof()
self._new_member = True
self._decompressor = self._decomp_factory(
**self._decomp_args)
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
self._init_read()
if not self._read_gzip_header():
self._size = self._pos
return b""
self._new_member = False
# Read a chunk of data from the file
buf = self._fp.read(io.DEFAULT_BUFFER_SIZE)
uncompress = self._decompressor.decompress(buf, size)
if self._decompressor.unconsumed_tail != b"":
self._fp.prepend(self._decompressor.unconsumed_tail)
elif self._decompressor.unused_data != b"":
# Prepend the already read bytes to the fileobj so they can
# be seen by _read_eof() and _read_gzip_header()
self._fp.prepend(self._decompressor.unused_data)
if uncompress != b"":
break
if buf == b"":
raise EOFError(
"Compressed file ended before the "
"end-of-stream marker was reached"
)
self._add_read_data(uncompress)
self._pos += len(uncompress)
return uncompress
def _add_read_data(self, data):
self._crc = zlib.crc32(data, self._crc)
self._stream_size = self._stream_size + len(data)
def _read_eof(self):
# We've read to the end of the file
# We check that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
crc32, isize = struct.unpack("<II", _read_exact(self._fp, 8))
if crc32 != self._crc:
log.warning(f"CRC check failed {hex(crc32)} != {hex(self._crc)}")
elif isize != (self._stream_size & 0xffffffff):
raise BadGzipFile("Incorrect length of data produced")
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = b"\x00"
while c == b"\x00":
c = self._fp.read(1)
if c:
self._fp.prepend(c)
def _rewind(self):
super()._rewind()
self._new_member = True
| 17,571
|
Python
|
.py
| 429
| 30.582751
| 80
| 0.577611
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,880
|
bgl_pos.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_pos.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2008-2020 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2011-2012 kubtek <kubtek@gmail.com>
# This file is part of PyGlossary project, http://github.com/ilius/pyglossary
# Thanks to Raul Fernandes <rgfbr@yahoo.com.br> and Karl Grill for reverse
# engineering as part of https://sourceforge.net/projects/ktranslator/
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
__all__ = ["partOfSpeechByCode"]
partOfSpeechByCode = {
# Use None for codes we have not seen yet
# Use "" for codes we've seen but part of speech is unknown
0x30: "noun",
0x31: "adjective",
0x32: "verb",
0x33: "adverb",
0x34: "interjection",
0x35: "pronoun",
0x36: "preposition",
0x37: "conjunction",
0x38: "suffix",
0x39: "prefix",
0x3A: "article",
0x3B: "", # in Babylon Italian-English.BGL,
# Babylon Spanish-English.BGL,
# Babylon_Chinese_S_English.BGL
# no indication of the part of speech
0x3C: "abbreviation",
# (short form: 'ר"ת')
# (full form: "ר"ת: ראשי תיבות")
# "ת'"
# adjective
# (full form: "ת': תואר")
# "ש"ע"
# noun
# (full form: "ש"ע: שם עצם")
0x3D: "masculine noun and adjective",
0x3E: "feminine noun and adjective",
0x3F: "masculine and feminine noun and adjective",
0x40: "feminine noun",
# (short form: "נ\'")
# (full form: "נ': נקבה")
0x41: "masculine and feminine noun",
# 0x41: noun that may be used as masculine and feminine
# (short form: "זו"נ")
# (full form: "זו"נ: זכר ונקבה")
0x42: "masculine noun",
# (short form: 'ז\'')
# (full form: "ז': זכר")
0x43: "numeral",
0x44: "participle",
0x45: None,
0x46: None,
0x47: None,
}
| 2,348
|
Python
|
.py
| 69
| 31.550725
| 78
| 0.716344
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,881
|
bgl_language.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_language.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2008-2020 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2011-2012 kubtek <kubtek@gmail.com>
# This file is part of PyGlossary project, http://github.com/ilius/pyglossary
# Thanks to Raul Fernandes <rgfbr@yahoo.com.br> and Karl Grill for reverse
# engineering as part of https://sourceforge.net/projects/ktranslator/
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
"""
language properties.
In this short note we describe how Babylon select encoding for key words,
alternates and definitions.
There are source and target encodings. The source encoding is used to
encode keys and alternates, the target encoding is used to encode
definitions.
The source encoding is selected based on the source language of the
dictionary, the target encoding is tied to the target language.
Babylon Glossary Builder allows you to specify source and target languages.
If you open a Builder project (a file with .gpr extension) in a text
editor, you should find the following elements:
<bab:SourceCharset>Latin</bab:SourceCharset>
<bab:SourceLanguage>English</bab:SourceLanguage>
<bab:TargetCharset>Latin</bab:TargetCharset>
<bab:TargetLanguage>English</bab:TargetLanguage>
Here bab:SourceLanguage is the source language that you select in the
builder wizard, bab:SourceCharset - is the corresponding charset.
bab:TargetLanguage - target language, bab:TargetCharset - corresponding
charset.
Unfortunately, builder does not tell us what encoding corresponds to charset,
but we can detect it.
A few words about how definitions are encoded. If all chars of the
definition fall into the target charset, Babylon use that charset to encode
the definition. If at least one char does not fall into the target charset,
Babylon use utf-8 encoding, wrapping the definition into <charset c=U> and
</charset> tags.
You can make Babylon use utf-8 encoding for the whole dictionary, in that case
all definitions, keys and alternates are encoded with utf-8. See Babylon
Glossary Builder wizard, Glossary Properties tab, Advanced button, Use UTF-8
encoding check box. Definitions are not augmented with extra mackup in this
case, that is you'll not find charset tags in definitions.
How you can tell what encoding was used for the particular definition in
.bgl file? You need to check the following conditions.
Block type 3, code 0x11. If 0x8000 bit is set, the whole dictionary use
utf-8 encoding.
If the definition starts with <charset c=U>, that definition uses utf-8
encoding.
Otherwise you need to consult the target encoding.
Block type 3, code 0x1b. That field normally contains 1 byte code of the
target encoding. Codes fill the range of 0x41 to 0x4e. Babylon Builder
generate codes 0x42 - 0x4e. How to generate code 0x41?
Occasionally you may encounter the field value is four zero bytes. In this
case, I guess, the default encoding for the target language is used.
Block type 3, code 0x08. That field contains 4-bytes code of the target
language. The first three bytes are always zero, the last byte is the code.
Playing with Babylon Glossary builder we can find language codes corresponding
to target language. The language codes fill the range of 0 to 0x3d.
How to detect the target encoding? Here is the technique I've used.
- Create a babylon glossary source file ( a file with .gls extension) with
the following contents. Start the file with utf-8 BOM for the builder
to recognize the utf-8 encoding. Use unicode code point code as key,
and a single unicode chars encoding in utf-8 as definition. Create keys
for all code points in the range 32 - 0x10000, or you may use wider range.
We do not use code points in the range 0-31, since they are control chars.
You should skip the following three chars: & < >. Since the definition
is supposed to contain html, these chars are be replaced by & <
> respectively. You should skip the char $ as well, it has special
meaning in definitions (?). Skip all code point that cannot encoded in
utf-8 (not all code points in the range 32-0x10000 represent valid chars).
- Now that you have a glossary source file, process it with builder selecting
the desired target language. Make sure the "use utf-8" option is no set.
You'll get a .bgl file.
- Process the generated .bgl file with pyglossary. Skip all definitions that
start with <charset c=U> tag. Try to decode definitions using different
encodings and match the result with the real value (key - code point char
code). Thus you'll find the encoding having the best match.
For example, you may do the following.
Loop over all available encodings, loop over all definitions in the
dictionary. Count the number of definitions that does not start with
charset tag - total. Among them count the number of definitions that were
correctly decoded - success. The encoding where total == success, is
the target encoding.
There are a few problems I encountered. It looks like python does not
correctly implement cp932 and cp950 encodings. For Japanese charset I
got 99.12% match, and for Traditional Chinese charset I got even less -
66.97%. To conform my guess that Japanese is cp932 and Traditional Chinese
is cp950 I built a C++ utility that worked on the data extracted from .bgl
dictionary. I used WideCharToMultiByte function for conversion. The C++
utility confirmed the cp932 and cp950 encodings, I got 100% match.
"""
from dataclasses import dataclass
__all__ = ["BabylonLanguage", "languageByCode"]
@dataclass(slots=True, frozen=True)
class BabylonLanguage:
"""
Babylon language properties.
name: bab:SourceLanguage, bab:TargetLanguage .gpr tags (English, French, Japanese)
charset: bab:SourceCharset, bab:TargetCharset .gpr tags (Latin, Arabic, Cyrillic)
encoding: Windows code page (cp1250, cp1251, cp1252)
code: value of the type 3, code in .bgl file
"""
name: str
charset: str
encoding: str
code: int
code2: str = ""
name2: str = ""
languages = (
BabylonLanguage(
name="English",
charset="Latin",
encoding="cp1252",
code=0x00,
code2="en",
),
BabylonLanguage(
name="French",
charset="Latin",
encoding="cp1252",
code=0x01,
code2="fr",
),
BabylonLanguage(
name="Italian",
charset="Latin",
encoding="cp1252",
code=0x02,
code2="it",
),
BabylonLanguage(
name="Spanish",
charset="Latin",
encoding="cp1252",
code=0x03,
code2="es",
),
BabylonLanguage(
name="Dutch",
charset="Latin",
encoding="cp1252",
code=0x04,
code2="nl",
),
BabylonLanguage(
name="Portuguese",
charset="Latin",
encoding="cp1252",
code=0x05,
code2="pt",
),
BabylonLanguage(
name="German",
charset="Latin",
encoding="cp1252",
code=0x06,
code2="de",
),
BabylonLanguage(
name="Russian",
charset="Cyrillic",
encoding="cp1251",
code=0x07,
code2="ru",
),
BabylonLanguage(
name="Japanese",
charset="Japanese",
encoding="cp932",
code=0x08,
code2="ja",
),
BabylonLanguage(
name="Chinese",
name2="Traditional Chinese",
charset="Traditional Chinese",
encoding="cp950",
code=0x09,
code2="zh",
),
BabylonLanguage(
name="Chinese",
name2="Simplified Chinese",
charset="Simplified Chinese",
encoding="cp936",
code=0x0A,
code2="zh",
),
BabylonLanguage(
name="Greek",
charset="Greek",
encoding="cp1253",
code=0x0B,
code2="el",
),
BabylonLanguage(
name="Korean",
charset="Korean",
encoding="cp949",
code=0x0C,
code2="ko",
),
BabylonLanguage(
name="Turkish",
charset="Turkish",
encoding="cp1254",
code=0x0D,
code2="tr",
),
BabylonLanguage(
name="Hebrew",
charset="Hebrew",
encoding="cp1255",
code=0x0E,
code2="he",
),
BabylonLanguage(
name="Arabic",
charset="Arabic",
encoding="cp1256",
code=0x0F,
code2="ar",
),
BabylonLanguage(
name="Thai",
charset="Thai",
encoding="cp874",
code=0x10,
code2="th",
),
BabylonLanguage(
name="Other",
charset="Latin",
encoding="cp1252",
code=0x11,
code2="", # none
),
BabylonLanguage(
name="Chinese",
name2="Other Simplified Chinese dialects",
charset="Simplified Chinese",
encoding="cp936",
code=0x12,
code2="zh", # duplicate
),
BabylonLanguage(
name="Chinese",
name2="Other Traditional Chinese dialects",
charset="Traditional Chinese",
encoding="cp950",
code=0x13,
code2="zh", # duplicate
),
BabylonLanguage(
name="Other Eastern-European languages",
charset="Eastern European",
encoding="cp1250",
code=0x14,
code2="", # none
),
BabylonLanguage(
name="Other Western-European languages",
charset="Latin",
encoding="cp1252",
code=0x15,
code2="", # none
),
BabylonLanguage(
name="Other Russian languages",
charset="Cyrillic",
encoding="cp1251",
code=0x16,
code2="", # none
),
BabylonLanguage(
name="Other Japanese languages",
charset="Japanese",
encoding="cp932",
code=0x17,
code2="", # none
),
BabylonLanguage(
name="Other Baltic languages",
charset="Baltic",
encoding="cp1257",
code=0x18,
code2="bat", # no 2-letter code
),
BabylonLanguage(
name="Other Greek languages",
charset="Greek",
encoding="cp1253",
code=0x19,
code2="", # none
),
BabylonLanguage(
name="Other Korean dialects",
charset="Korean",
encoding="cp949",
code=0x1A,
code2="", # none
),
BabylonLanguage(
name="Other Turkish dialects",
charset="Turkish",
encoding="cp1254",
code=0x1B,
code2="", # none
),
BabylonLanguage(
name="Other Thai dialects",
charset="Thai",
encoding="cp874",
code=0x1C,
code2="tai", # no 2-letter code, and "tha" / "th" is for "Thai"
),
BabylonLanguage(
name="Polish",
charset="Eastern European",
encoding="cp1250",
code=0x1D,
code2="pl",
),
BabylonLanguage(
name="Hungarian",
charset="Eastern European",
encoding="cp1250",
code=0x1E,
code2="hu",
),
BabylonLanguage(
name="Czech",
charset="Eastern European",
encoding="cp1250",
code=0x1F,
code2="cs",
),
BabylonLanguage(
name="Lithuanian",
charset="Baltic",
encoding="cp1257",
code=0x20,
code2="lt",
),
BabylonLanguage(
name="Latvian",
charset="Baltic",
encoding="cp1257",
code=0x21,
code2="lv",
),
BabylonLanguage(
name="Catalan",
charset="Latin",
encoding="cp1252",
code=0x22,
code2="ca",
),
BabylonLanguage(
name="Croatian",
charset="Eastern European",
encoding="cp1250",
code=0x23,
code2="hr",
),
BabylonLanguage(
name="Serbian",
charset="Eastern European",
encoding="cp1250",
code=0x24,
code2="sr",
),
BabylonLanguage(
name="Slovak",
charset="Eastern European",
encoding="cp1250",
code=0x25,
code2="sk",
),
BabylonLanguage(
name="Albanian",
charset="Latin",
encoding="cp1252",
code=0x26,
code2="sq",
),
BabylonLanguage(
name="Urdu",
charset="Arabic",
encoding="cp1256",
code=0x27,
code2="ur",
),
BabylonLanguage(
name="Slovenian",
charset="Eastern European",
encoding="cp1250",
code=0x28,
code2="sl",
),
BabylonLanguage(
name="Estonian",
charset="Latin",
encoding="cp1252",
code=0x29,
code2="et",
),
BabylonLanguage(
name="Bulgarian",
charset="Eastern European",
encoding="cp1250",
code=0x2A,
code2="bg",
),
BabylonLanguage(
name="Danish",
charset="Latin",
encoding="cp1252",
code=0x2B,
code2="da",
),
BabylonLanguage(
name="Finnish",
charset="Latin",
encoding="cp1252",
code=0x2C,
code2="fi",
),
BabylonLanguage(
name="Icelandic",
charset="Latin",
encoding="cp1252",
code=0x2D,
code2="is",
),
BabylonLanguage(
name="Norwegian",
charset="Latin",
encoding="cp1252",
code=0x2E,
code2="no",
),
BabylonLanguage(
name="Romanian",
charset="Latin",
encoding="cp1252",
code=0x2F,
code2="ro",
),
BabylonLanguage(
name="Swedish",
charset="Latin",
encoding="cp1252",
code=0x30,
code2="sv",
),
BabylonLanguage(
name="Ukrainian",
charset="Cyrillic",
encoding="cp1251",
code=0x31,
code2="uk",
),
BabylonLanguage(
name="Belarusian",
charset="Cyrillic",
encoding="cp1251",
code=0x32,
code2="be",
),
BabylonLanguage(
name="Persian", # aka "Farsi"
charset="Arabic",
encoding="cp1256",
code=0x33,
code2="fa",
),
BabylonLanguage(
name="Basque",
charset="Latin",
encoding="cp1252",
code=0x34,
code2="eu",
),
BabylonLanguage(
name="Macedonian",
charset="Eastern European",
encoding="cp1250",
code=0x35,
code2="mk",
),
BabylonLanguage(
name="Afrikaans",
charset="Latin",
encoding="cp1252",
code=0x36,
code2="af",
),
BabylonLanguage(
# Babylon Glossary Builder spells this language "Faeroese"
name="Faroese",
charset="Latin",
encoding="cp1252",
code=0x37,
code2="fo",
),
BabylonLanguage(
name="Latin",
charset="Latin",
encoding="cp1252",
code=0x38,
code2="la",
),
BabylonLanguage(
name="Esperanto",
charset="Turkish",
encoding="cp1254",
code=0x39,
code2="eo",
),
BabylonLanguage(
name="Tamazight",
# aka "Standard Moroccan Tamazight", "Standard Moroccan Berber"
# or "Standard Moroccan Amazigh"
charset="Latin",
encoding="cp1252",
code=0x3A,
code2="zgh", # no 2-letter code (ISO 639-1)
),
BabylonLanguage(
name="Armenian",
charset="Latin",
encoding="cp1252",
code=0x3B,
code2="hy",
),
BabylonLanguage(
name="Hindi",
charset="Latin",
encoding="cp1252",
code=0x3C,
code2="hi",
),
BabylonLanguage(
name="Somali",
charset="Latin",
encoding="cp1252",
code=0x3D,
code2="so",
),
)
languageByCode = {lang.code: lang for lang in languages}
| 14,129
|
Python
|
.py
| 563
| 22.648313
| 83
| 0.736248
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,882
|
bgl_reader_debug.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_reader_debug.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
#
# Copyright © 2008-2021 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
import gzip
import os
import re
from os.path import join
from pyglossary.core import log
from pyglossary.text_utils import (
toStr,
uintFromBytes,
)
from .bgl_reader import BGLGzipFile, BglReader, Block, FileOffS, tmpDir
def isASCII(data: str) -> bool:
for c in data: # noqa: SIM110
if ord(c) >= 128:
return False
return True
class MetaData:
def __init__(self) -> None:
self.blocks = []
self.numEntries = None
self.numBlocks = None
self.numFiles = None
self.gzipStartOffset = None
self.gzipEndOffset = None
self.fileSize = None
self.bglHeader = None # data before gzip header
class MetaDataBlock:
def __init__(self, data: bytes, _type: str) -> None:
self.data = data
self.type = _type
class MetaDataRange:
def __init__(self, _type: str, count: int) -> None:
self.type = _type
self.count = count
class MetaData2:
"""
Second pass metadata.
We need to scan all definitions in order to collect these
statistical data.
"""
def __init__(self) -> None:
# defiTrailingFields[i] - number of fields with code i found
self.defiTrailingFields = [0] * 256
self.isDefiASCII = True
# isDefiASCII = true if all definitions contain only ASCII chars
"""
We apply a number of tests to each definition, excluding those with
overwritten encoding (they start with <charset c=U>).
defiProcessedCount - total number of definitions processed
defiUtf8Count - number of definitions in utf8 encoding
defiAsciiCount - number of definitions containing only ASCII chars
"""
self.defiProcessedCount = 0
self.defiUtf8Count = 0
self.defiAsciiCount = 0
self.charRefs = {} # encoding -> [ 0 ] * 257
class GzipWithCheck:
"""
gzip.GzipFile with check.
It checks that unpacked data match what was packed.
"""
def __init__(
self,
fileobj,
unpackedPath,
reader,
closeFileobj=False,
) -> None:
"""
constructor.
fileobj - gzip file - archive
unpackedPath - path of a file containing original data, for testing.
reader - reference to BglReader class instance, used for logging.
"""
self.file = BGLGzipFile(
fileobj=fileobj,
closeFileobj=closeFileobj,
)
self.unpackedFile = open(unpackedPath, "rb") # noqa: SIM115
self.reader = reader
def __del__(self) -> None:
self.close()
def close(self) -> None:
if self.file:
self.file.close()
self.file = None
if self.unpackedFile:
self.unpackedFile.close()
self.unpackedFile = None
def read(self, size=-1):
buf1 = self.file.read(size)
buf2 = self.unpackedFile.read(size)
if buf1 != buf2:
self.reader.msgLogFileWrite(
f"GzipWithCheck.read: !=: size = {buf1}, ({buf2}) ({size})",
)
# else:
# self.reader.msgLogFileWrite(
# f"GzipWithCheck.read: ==: size = {buf1}, ({buf2}) ({size})",
# )
return buf1
def seek(self, offset, whence=os.SEEK_SET):
self.file.seek(offset, whence)
self.unpackedFile.seek(offset, whence)
# self.reader.msgLogFileWrite(
# f"GzipWithCheck.seek: offset = {offset}, whence = {whence}",
# )
def tell(self):
pos1 = self.file.tell()
pos2 = self.unpackedFile.tell()
if pos1 != pos2:
self.reader.msgLogFileWrite(
f"GzipWithCheck.tell: !=: {pos1} {pos2}",
)
# else:
# self.reader.msgLogFileWrite(
# f"GzipWithCheck.tell: ==: {pos1} {pos2}",
# )
return pos1
def flush(self):
if os.sep == "\\":
pass
# a bug in Windows
# after file.flush, file.read returns garbage
else:
self.file.flush()
self.unpackedFile.flush()
class DebugBglReader(BglReader):
_collect_metadata2: bool = False
_search_char_samples: bool = False
_write_gz: bool = False
_raw_dump_path: str = ""
_unpacked_gzip_path: str = ""
_char_samples_path: str = ""
_msg_log_path: str = ""
def open(
self,
filename,
):
if not BglReader.open(self, filename):
return
self.metadata2 = MetaData2() if self._collect_metadata2 else None
if self._search_char_samples:
self.targetCharsArray = [False] * 256
else:
self.targetCharsArray = None
if self._raw_dump_path:
self.rawDumpFile = open(self._raw_dump_path, "w", encoding="utf-8")
if self._char_samples_path:
self.samplesDumpFile = open(self._char_samples_path, "w", encoding="utf-8")
if self._msg_log_path:
self.msgLogFile = open(self._msg_log_path, "w", encoding="utf-8")
self.charRefStatPattern = re.compile(b"(&#\\w+;)", re.IGNORECASE)
def openGzip(self):
with open(self._filename, "rb") as bglFile:
if not bglFile:
log.error(f"file pointer empty: {bglFile}")
return False
buf = bglFile.read(6)
if len(buf) < 6 or buf[:4] not in {
b"\x12\x34\x00\x01",
b"\x12\x34\x00\x02",
}:
log.error(f"invalid header: {buf[:6]!r}")
return False
self.gzipOffset = gzipOffset = uintFromBytes(buf[4:6])
log.debug(f"Position of gz header: {gzipOffset}")
if gzipOffset < 6:
log.error(f"invalid gzip header position: {gzipOffset}")
return False
if self._write_gz:
self.dataFile = self._filename + "-data.gz"
try:
f2 = open(self.dataFile, "wb")
except OSError:
log.exception("error while opening gzip data file")
self.dataFile = join(
tmpDir,
os.path.split(self.m_filename)[-1] + "-data.gz",
)
f2 = open(self.dataFile, "wb")
bglFile.seek(gzipOffset)
f2.write(bglFile.read())
f2.close()
self.file = gzip.open(self.dataFile, "rb")
return None
f2 = FileOffS(self._filename, gzipOffset)
if self._unpacked_gzip_path:
self.file = GzipWithCheck(
f2,
self._unpacked_gzip_path,
self,
closeFileobj=True,
)
return None
self.file = BGLGzipFile(
fileobj=f2,
closeFileobj=True,
)
return None
def close(self) -> None:
BglReader.close(self)
if self.rawDumpFile:
self.rawDumpFile.close()
self.rawDumpFile = None
if self.msgLogFile:
self.msgLogFile.close()
self.msgLogFile = None
if self.samplesDumpFile:
self.samplesDumpFile.close()
self.samplesDumpFile = None
def __del__(self) -> None:
BglReader.__del__(self)
def readEntryWord(self, block, pos):
succeed, pos, _u_word, b_word = BglReader.readEntryWord(self, block, pos)
if not succeed:
return
self.rawDumpFileWriteText(f"\n\nblock type = {block.type}\nkey = ")
self.rawDumpFileWriteData(b_word)
def readEntryDefi(self, block, pos, b_key):
succeed, pos, _u_defi, b_defi = BglReader.readEntryDefi(self, block, pos, b_key)
if not succeed:
return
self.rawDumpFileWriteText("\ndefi = ")
self.rawDumpFileWriteData(b_defi)
"""
def readEntryAlts(self, block, pos, b_key, key):
succeed, pos, alts, b_alts = \
BglReader.readEntryAlts(self, block, pos, b_key, key)
if not succeed:
return
for b_alt in b_alts:
self.rawDumpFileWriteText("\nalt = ")
self.rawDumpFileWriteData(b_alt)
"""
def charReferencesStat(self, b_text, encoding):
"""b_text is bytes instance."""
# “
# ċ
if not self.metadata2:
return
if encoding not in self.metadata2.charRefs:
self.metadata2.charRefs[encoding] = [0] * 257
charRefs = self.metadata2.charRefs[encoding]
for index, b_part in enumerate(self.charRefStatPattern.split(b_text)):
if index % 2 != 1:
continue
try:
code = (
int(b_part[3:-1], 16)
if b_part[:3].lower() == "&#x"
else int(b_part[2:-1])
)
except (ValueError, OverflowError):
continue
if code <= 0:
continue
code = min(code, 256)
charRefs[code] += 1
# write text to dump file as is
def rawDumpFileWriteText(self, text): # FIXME
text = toStr(text)
if self.rawDumpFile:
self.rawDumpFile.write(text)
# write data to dump file unambiguously representing control chars
# escape "\" with "\\"
# print control chars as "\xhh"
def rawDumpFileWriteData(self, text):
text = toStr(text)
# the next function escapes too many chars, for example, it escapes äöü
# self.rawDumpFile.write(text.encode("unicode_escape"))
if self.rawDumpFile:
self.rawDumpFile.write(text)
def msgLogFileWrite(self, text):
text = toStr(text)
if self.msgLogFile:
offset = self.msgLogFile.tell()
# print offset in the log file to facilitate navigating this
# log in hex editor
# intended usage:
# the log file is opened in a text editor and hex editor
# use text editor to read error messages, use hex editor to
# inspect char codes offsets allows to quickly jump to the right
# place of the file hex editor
self.msgLogFile.write(f"\noffset = {offset:#02x}\n")
self.msgLogFile.write(text + "\n")
else:
log.debug(text)
def samplesDumpFileWrite(self, text):
text = toStr(text)
if self.samplesDumpFile:
offset = self.samplesDumpFile.tell()
self.samplesDumpFile.write(f"\noffset = {offset:#02x}\n")
self.samplesDumpFile.write(text + "\n")
else:
log.debug(text)
def dumpBlocks(self, dumpPath):
import pickle
self.file.seek(0)
metaData = MetaData()
metaData.numFiles = 0
metaData.gzipStartOffset = self.gzipOffset
self.numEntries = 0
self.numBlocks = 0
range_type = None
range_count = 0
block = Block()
while not self.isEndOfDictData():
log.debug(
f"readBlock: offset {self.file.tell():#02x}, "
f"unpacked offset {self.file.unpackedFile.tell():#02x}",
)
if not self.readBlock(block):
break
self.numBlocks += 1
if block.type in {1, 7, 10, 11, 13}:
self.numEntries += 1
elif block.type == 2: # Embedded File (mostly Image or HTML)
metaData.numFiles += 1
if block.type in {1, 2, 7, 10, 11, 13}:
if range_type == block.type:
range_count += 1
else:
if range_count > 0:
mblock = MetaDataRange(range_type, range_count)
metaData.blocks.append(mblock)
range_count = 0
range_type = block.type
range_count = 1
else:
if range_count > 0:
mblock = MetaDataRange(range_type, range_count)
metaData.blocks.append(mblock)
range_count = 0
mblock = MetaDataBlock(block.data, block.type)
metaData.blocks.append(mblock)
if range_count > 0:
mblock = MetaDataRange(range_type, range_count)
metaData.blocks.append(mblock)
range_count = 0
metaData.numEntries = self.numEntries
metaData.numBlocks = self.numBlocks
metaData.gzipEndOffset = self.file_bgl.tell()
metaData.fileSize = os.path.getsize(self._filename)
with open(self._filename, "rb") as f:
metaData.bglHeader = f.read(self.gzipOffset)
with open(dumpPath, "wb") as f:
pickle.dump(metaData, f)
self.file.seek(0)
def dumpMetadata2(self, dumpPath):
import pickle
if not self.metadata2:
return
with open(dumpPath, "wb") as f:
pickle.dump(self.metadata2, f)
def processDefiStat(self, fields, defi, b_key): # noqa: PLR0912
BglReader.processDefiStat(self, fields, defi, b_key)
if fields.b_title:
self.rawDumpFileWriteText("\ndefi title: ")
self.rawDumpFileWriteData(fields.b_title)
if fields.b_title_trans:
self.rawDumpFileWriteText("\ndefi title trans: ")
self.rawDumpFileWriteData(fields.b_title_trans)
if fields.b_transcription_50:
self.rawDumpFileWriteText(
f"\ndefi transcription_50 ({fields.code_transcription_50:#x}): ",
)
self.rawDumpFileWriteData(fields.b_transcription_50)
if fields.b_transcription_60:
self.rawDumpFileWriteText(
f"\ndefi transcription_60 ({fields.code_transcription_60:#x}): ",
)
self.rawDumpFileWriteData(fields.b_transcription_60)
if fields.b_field_1a:
self.rawDumpFileWriteText("\ndefi field_1a: ")
self.rawDumpFileWriteData(fields.b_field_1a)
if fields.b_field_13:
self.rawDumpFileWriteText(
f"\ndefi field_13 bytes: {fields.b_field_13!r}",
)
if fields.b_field_07:
self.rawDumpFileWriteText("\ndefi field_07: ")
self.rawDumpFileWriteData(fields.b_field_07)
if fields.b_field_06:
self.rawDumpFileWriteText(
f"\ndefi field_06: {fields.b_field_06}",
)
if fields.singleEncoding:
self.findAndPrintCharSamples(
fields.b_defi,
f"defi, key = {b_key}",
fields.encoding,
)
if self.metadata2:
self.metadata2.defiProcessedCount += 1
if isASCII(toStr(fields.b_defi)):
self.metadata2.defiAsciiCount += 1
try:
fields.b_defi.decode("utf-8")
except UnicodeError:
pass
else:
self.metadata2.defiUtf8Count += 1
if self.metadata2 and self.metadata2.isDefiASCII and not isASCII(fields.u_defi):
self.metadata2.isDefiASCII = False
# search for new chars in data
# if new chars are found, mark them with a special sequence in the text
# and print result into msg log
def findAndPrintCharSamples(self, b_data: bytes, hint, encoding):
if not self.targetCharsArray:
return
offsets = self.findCharSamples(b_data)
if len(offsets) == 0:
return
res = ""
utf8 = encoding.lower() == "utf-8"
i = 0
for o in offsets:
j = o
if utf8:
while b_data[j] & 0xC0 == 0x80:
j -= 1
res += b_data[i:j]
res += "!!!--+!!!"
i = j
res += b_data[j:]
offsets_str = " ".join([str(el) for el in offsets])
self.samplesDumpFileWrite(
f"charSample({hint})\noffsets = {offsets_str}"
f"\nmarked = {res}\norig = {b_data}\n",
)
def findCharSamples(self, b_data):
"""
Find samples of chars in b_data.
Search for chars in data that have not been marked so far in
the targetCharsArray array, mark new chars.
Returns a list of offsets in b_data
May return an empty list.
"""
res = []
if not isinstance(b_data, bytes):
log.error("findCharSamples: b_data is not a bytes instance")
return res
if not self.targetCharsArray:
log.error(
f"findCharSamples: self.targetCharsArray={self.targetCharsArray}",
)
return res
for i, char in enumerate(b_data):
if char < 128:
continue
if not self.targetCharsArray[char]:
self.targetCharsArray[char] = True
res.append(i)
return res
| 14,600
|
Python
|
.py
| 473
| 27.298097
| 82
| 0.701522
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,883
|
bgl_internal_test.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_internal_test.py
|
import unittest
from pyglossary.plugins.babylon_bgl.bgl_reader_debug import isASCII
class BglInternalTest(unittest.TestCase):
def test_isASCII(self):
f = isASCII
self.assertEqual(f(""), True)
self.assertEqual(f("abc"), True)
self.assertEqual(f("xyz"), True)
self.assertEqual(f("ABC"), True)
self.assertEqual(f("XYZ"), True)
self.assertEqual(f("1234567890"), True)
self.assertEqual(f("\n\r\t"), True)
self.assertEqual(f("\x80"), False)
self.assertEqual(f("abc\x80"), False)
self.assertEqual(f("abc\xff"), False)
| 537
|
Python
|
.py
| 15
| 33.066667
| 67
| 0.726397
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,884
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/__init__.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
#
# Copyright © 2008-2021 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from .bgl_reader import BglReader as Reader
from .bgl_reader import optionsProp
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "babylon_bgl"
format = "BabylonBgl"
description = "Babylon (.BGL)"
extensions = (".bgl",)
extensionCreate = ""
singleFile = True
kind = "binary"
wiki = ""
website = None
# progressbar = DEFAULT_YES
# FIXME: document type of read/write options
# (that would be specified in command line)
| 1,367
|
Python
|
.py
| 47
| 27.744681
| 78
| 0.74924
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,885
|
bgl_info.py
|
ilius_pyglossary/pyglossary/plugins/babylon_bgl/bgl_info.py
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
#
# Copyright © 2008-2021 Saeed Rasooli <saeed.gnu@gmail.com> (ilius)
# Copyright © 2011-2012 kubtek <kubtek@gmail.com>
# This file is part of PyGlossary project, http://github.com/ilius/pyglossary
# Thanks to Raul Fernandes <rgfbr@yahoo.com.br> and Karl Grill for reverse
# engineering as part of https://sourceforge.net/projects/ktranslator/
#
# This program is a free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. Or on Debian systems, from /usr/share/common-licenses/GPL
# If not, see <http://www.gnu.org/licenses/gpl.txt>.
from __future__ import annotations
from collections.abc import Callable
from typing import Any
from pyglossary import gregorian
from pyglossary.core import log
from pyglossary.text_utils import (
uintFromBytes,
)
from .bgl_charset import charsetByCode
from .bgl_language import BabylonLanguage, languageByCode
__all__ = ["charsetInfoDecode", "infoType3ByCode"]
class InfoItem:
__slots__ = (
"attr",
"decode",
"name",
)
def __init__(
self,
name: str,
decode: "Callable[[bytes], Any] | None" = None,
attr: bool = False,
) -> None:
self.name = name
self.decode = decode
self.attr = attr
def decodeBglBinTime(b_value: bytes) -> str:
jd1970 = gregorian.to_jd(1970, 1, 1)
djd, hm = divmod(uintFromBytes(b_value), 24 * 60)
year, month, day = gregorian.jd_to(djd + jd1970)
hour, minute = divmod(hm, 60)
return f"{year:04d}/{month:02d}/{day:02d}, {hour:02d}:{minute:02d}"
def languageInfoDecode(b_value: bytes) -> BabylonLanguage | None:
"""Returns BabylonLanguage instance."""
intValue = uintFromBytes(b_value)
try:
return languageByCode[intValue]
except IndexError:
log.warning(f"read_type_3: unknown language code = {intValue}")
return None
def charsetInfoDecode(b_value: bytes) -> str | None:
value = b_value[0]
try:
return charsetByCode[value]
except KeyError:
log.warning(f"read_type_3: unknown charset {value!r}")
return None
def aboutInfoDecode(b_value: bytes) -> dict[str, any]:
if not b_value:
return None
b_aboutExt, _, aboutContents = b_value.partition(b"\x00")
if not b_aboutExt:
log.warning("read_type_3: about: no file extension")
return None
try:
aboutExt = b_aboutExt.decode("ascii")
except UnicodeDecodeError as e:
log.error(f"{b_aboutExt=}: {e}")
aboutExt = ""
return {
"about_extension": aboutExt,
"about": aboutContents,
}
def utf16InfoDecode(b_value: bytes) -> str | None:
r"""
Decode info values from UTF-16.
Return str, or None (on errors).
block type = 3
block format: <2 byte code1><2 byte code2>
if code2 == 0: then the block ends
if code2 == 1: then the block continues as follows:
<4 byte len1> \x00 \x00 <message in utf-16>
len1 - length of message in 2-byte chars
"""
if b_value[0] != 0:
log.warning(
f"utf16InfoDecode: b_value={b_value}, null expected at 0",
)
return None
if b_value[1] == 0:
if len(b_value) > 2:
log.warning(
f"utf16InfoDecode: unexpected b_value size: {len(b_value)}",
)
return None
if b_value[1] > 1:
log.warning(
f"utf16InfoDecode: b_value={b_value!r}, unexpected byte at 1",
)
return None
# now b_value[1] == 1
size = 2 * uintFromBytes(b_value[2:6])
if tuple(b_value[6:8]) != (0, 0):
log.warning(
f"utf16InfoDecode: b_value={b_value!r}, null expected at 6:8",
)
if size != len(b_value) - 8:
log.warning(
f"utf16InfoDecode: b_value={b_value!r}, size does not match",
)
return b_value[8:].decode("utf16") # str
def flagsInfoDecode(b_value: bytes) -> dict[str, bool]:
"""
Returns a dict with these keys:
utf8Encoding
when this flag is set utf8 encoding is used for all articles
when false, the encoding is set according to the source and
target alphabet
bgl_spellingAlternatives
determines whether the glossary offers spelling alternatives
for searched terms
bgl_caseSensitive
defines if the search for terms in this glossary is
case sensitive
see code 0x20 as well.
"""
flags = uintFromBytes(b_value)
return {
"utf8Encoding": (flags & 0x8000 != 0),
"bgl_spellingAlternatives": (flags & 0x10000 == 0),
"bgl_caseSensitive": (flags & 0x1000 != 0),
}
infoType3ByCode = {
# glossary name
0x01: InfoItem("title"),
# glossary author name, a list of "|"-separated values
0x02: InfoItem("author"),
# glossary author e-mail
0x03: InfoItem("email"),
0x04: InfoItem("copyright"),
0x07: InfoItem(
"sourceLang",
decode=languageInfoDecode,
attr=True,
),
0x08: InfoItem(
"targetLang",
decode=languageInfoDecode,
attr=True,
),
0x09: InfoItem("description"),
# 0: browsing disabled, 1: browsing enabled
0x0A: InfoItem(
"bgl_browsingEnabled",
decode=lambda b_value: (b_value[0] != 0),
),
0x0B: InfoItem("icon1.ico"),
0x0C: InfoItem(
"bgl_numEntries",
decode=uintFromBytes,
attr=True,
),
# the value is a dict
0x11: InfoItem("flags", decode=flagsInfoDecode),
0x14: InfoItem("creationTime", decode=decodeBglBinTime),
0x1A: InfoItem(
"sourceCharset",
decode=charsetInfoDecode,
attr=True,
),
0x1B: InfoItem(
"targetCharset",
decode=charsetInfoDecode,
attr=True,
),
0x1C: InfoItem(
"bgl_firstUpdated",
decode=decodeBglBinTime,
),
# bgl_firstUpdated was previously called middleUpdated
# in rare cases, bgl_firstUpdated is before creationTime
# but usually it looks like to be the first update (after creation)
# in some cases, it's the same as lastUpdated
# in some cases, it's minutes after creationTime
# bgl_firstUpdated exists in more glossaries than lastUpdated
# so if lastUpdated is not there, we use bgl_firstUpdated as lastUpdated
0x20: InfoItem(
"bgl_caseSensitive2",
decode=lambda b_value: (b_value[0] == 0x31),
# 0x30 - case sensitive search is disabled
# 0x31 - case sensitive search is enabled
),
0x24: InfoItem("icon2.ico"),
0x2C: InfoItem(
"bgl_purchaseLicenseMsg",
decode=utf16InfoDecode,
),
0x2D: InfoItem(
"bgl_licenseExpiredMsg",
decode=utf16InfoDecode,
),
0x2E: InfoItem("bgl_purchaseAddress"),
0x30: InfoItem(
"bgl_titleWide",
decode=utf16InfoDecode,
),
# a list of "|"-separated values
0x31: InfoItem(
"bgl_authorWide",
decode=utf16InfoDecode,
),
0x33: InfoItem(
"lastUpdated",
decode=decodeBglBinTime,
),
0x3B: InfoItem("bgl_contractions"),
# contains a value like "Arial Unicode MS" or "Tahoma"
0x3D: InfoItem("bgl_fontName"),
# value would be dict
0x41: InfoItem(
"bgl_about",
decode=aboutInfoDecode,
),
# the length of the substring match in a term
0x43: InfoItem(
"bgl_length",
decode=uintFromBytes,
),
}
| 7,052
|
Python
|
.py
| 241
| 26.929461
| 78
| 0.731711
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,886
|
summarize.py
|
ilius_pyglossary/pyglossary/plugins/edict2/summarize.py
|
from __future__ import annotations
import re
import string
__all__ = ["summarize"]
parenthetical = re.compile(r"\([^)]+?\)")
punct_table = {ord(p): " " for p in string.punctuation if p not in "-'"}
stops = {
"i",
"me",
"my",
"myself",
"we",
"our",
"ours",
"ourselves",
"you",
"your",
"yours",
"yourself",
"yourselves",
"he",
"him",
"his",
"himself",
"she",
"her",
"hers",
"herself",
"it",
"its",
"itself",
"they",
"them",
"their",
"theirs",
"themselves",
"what",
"which",
"who",
"whom",
"this",
"that",
"these",
"those",
"am",
"is",
"are",
"was",
"were",
"be",
"been",
"being",
"have",
"has",
"had",
"having",
"do",
"does",
"did",
"doing",
"a",
"an",
"the",
"and",
"but",
"if",
"or",
"because",
"as",
"until",
"while",
"of",
"at",
"by",
"for",
"with",
"about",
"against",
"between",
"into",
"through",
"during",
"before",
"after",
"above",
"below",
"to",
"from",
"up",
"down",
"in",
"out",
"on",
"off",
"over",
"under",
"again",
"further",
"then",
"once",
"here",
"there",
"when",
"where",
"why",
"how",
"all",
"any",
"both",
"each",
"few",
"more",
"most",
"other",
"some",
"such",
"no",
"nor",
"not",
"only",
"own",
"same",
"so",
"than",
"too",
"very",
"s",
"t",
"can",
"will",
"just",
"don",
"should",
"now",
"d",
"ll",
"m",
"o",
"re",
"ve",
"y",
"ain",
"aren",
"couldn",
"didn",
"doesn",
"hadn",
"hasn",
"haven",
"isn",
"ma",
"mightn",
"mustn",
"needn",
"shan",
"shouldn",
"wasn",
"weren",
"won",
"wouldn",
}
def summarize(phrase: str) -> str:
phrase = parenthetical.sub("", phrase)
phrase = phrase.translate(punct_table)
words = phrase.split()
relevant_words = [word for word in words if word not in stops]
if not relevant_words:
relevant_words = words
return " ".join(relevant_words[:10])
| 1,860
|
Python
|
.py
| 169
| 9.023669
| 72
| 0.551008
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,887
|
conv.py
|
ilius_pyglossary/pyglossary/plugins/edict2/conv.py
|
from __future__ import annotations
import re
from collections.abc import Callable
from io import BytesIO
from typing import TYPE_CHECKING, NamedTuple, cast
from lxml import etree as ET
from pyglossary.core import log
from .pinyin import convert
from .summarize import summarize
if TYPE_CHECKING:
from collections.abc import Sequence
from pyglossary.lxml_types import T_htmlfile
line_reg = re.compile(r"^([^ ]+) ([^ ]+) \[([^\]]+)\] /(.+)/$")
COLORS = {
"": "black",
"1": "red",
"2": "orange",
"3": "green",
"4": "blue",
"5": "black",
}
def parse_line_trad(line: str) -> tuple[str, str, str, list[str]] | None:
line = line.strip()
match = line_reg.match(line)
if match is None:
return None
trad, simp, pinyin, eng = match.groups()
pinyin = pinyin.replace("u:", "v")
return trad, simp, pinyin, eng.split("/")
def parse_line_simp(line: str) -> tuple[str, str, str, list[str]] | None:
line = line.strip()
match = line_reg.match(line)
if match is None:
return None
trad, simp, pinyin, eng = match.groups()
pinyin = pinyin.replace("u:", "v")
return simp, trad, pinyin, eng.split("/")
class Article(NamedTuple):
first: str
second: str
pinyin: str
eng: list[str]
def names(self) -> list[str]:
return [self.first, self.second, self.pinyin] + list(map(summarize, self.eng))
def render_syllables_no_color(
hf: "T_htmlfile",
syllables: Sequence[str],
_tones: Sequence[str],
) -> None:
with hf.element("div", style="display: inline-block"):
for syllable in syllables:
with hf.element("font", color=""):
hf.write(syllable)
def render_syllables_color(
hf: "T_htmlfile",
syllables: Sequence[str],
tones: Sequence[str],
) -> None:
if len(syllables) != len(tones):
log.warning(f"unmatched tones: {syllables=}, {tones=}")
render_syllables_no_color(hf, syllables, tones)
return
with hf.element("div", style="display: inline-block"):
for index, syllable in enumerate(syllables):
with hf.element("font", color=COLORS[tones[index]]):
hf.write(syllable)
# @lru_cache(maxsize=128)
def convert_pinyin(pinyin: str) -> tuple[Sequence[str], Sequence[str]]:
return tuple(zip(*map(convert, pinyin.split()), strict=False))
def render_article(
render_syllables: Callable,
article: Article,
) -> tuple[list[str], str]:
names = article.names()
# pinyin_tones = [convert(syl) for syl in pinyin.split()]
pinyin_list, tones = convert_pinyin(article.pinyin)
f = BytesIO()
with ET.htmlfile(f, encoding="utf-8") as _hf: # noqa: PLR1702
hf = cast("T_htmlfile", _hf)
with hf.element("div", style="border: 1px solid; padding: 5px"):
with hf.element("div"):
with hf.element("big"):
render_syllables(hf, names[0], tones)
if names[1] != names[0]:
hf.write("\xa0/\xa0") # "\xa0" --> " " == " "
render_syllables(hf, names[1], tones)
hf.write(ET.Element("br"))
with hf.element("big"):
render_syllables(hf, pinyin_list, tones)
with hf.element("div"):
with hf.element("ul"):
for defn in article.eng:
with hf.element("li"):
hf.write(defn)
return names, f.getvalue().decode("utf-8")
| 3,114
|
Python
|
.py
| 95
| 29.957895
| 80
| 0.682959
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,888
|
__init__.py
|
ilius_pyglossary/pyglossary/plugins/edict2/__init__.py
|
from __future__ import annotations
from collections.abc import Iterator
from typing import TYPE_CHECKING
from pyglossary.core import log
from pyglossary.io_utils import nullTextIO
from pyglossary.option import (
BoolOption,
EncodingOption,
Option,
)
from . import conv
if TYPE_CHECKING:
import io
from pyglossary.glossary_types import EntryType, GlossaryType
__all__ = [
"Reader",
"description",
"enable",
"extensionCreate",
"extensions",
"format",
"kind",
"lname",
"optionsProp",
"singleFile",
"website",
"wiki",
]
enable = True
lname = "edict2"
format = "EDICT2"
description = "EDICT2 (CEDICT) (.u8)"
extensions = (".u8",)
extensionCreate = ""
singleFile = True
kind = "text"
wiki = "https://en.wikipedia.org/wiki/CEDICT"
website = None
# Websites / info for different uses of format:
# CC-CEDICT: Chinese-English (122k entries)
# "https://cc-cedict.org/editor/editor.php", "CC-CEDICT Editor"
# HanDeDict: Chinese-German (144k entries)
# "https://handedict.zydeo.net/de/download",
# "Herunterladen - HanDeDict @ Zydeo Wörterbuch Chinesisch-Deutsch"
# CFDICT: Chinese-French (56k entries)
# "https://chine.in/mandarin/dictionnaire/CFDICT/",
# "Dictionnaire chinois français _ 汉法词典 — Chine Informations"
# CC-Canto is Pleco Software's addition of Cantonese language readings
# in Jyutping transcription to CC-CEDICT
# "https://cantonese.org/download.html",
optionsProp: "dict[str, Option]" = {
"encoding": EncodingOption(),
"traditional_title": BoolOption(
comment="Use traditional Chinese for entry titles/keys",
),
"colorize_tones": BoolOption(
comment="Set to false to disable tones coloring",
),
}
class Reader:
depends = {
"lxml": "lxml",
}
_encoding: str = "utf-8"
_traditional_title: bool = False
_colorize_tones: bool = True
def __init__(self, glos: GlossaryType) -> None:
self._glos = glos
self.file: "io.TextIOBase" = nullTextIO
self._fileSize = 0
def open(self, filename: str) -> None:
# self._glos.sourceLangName = "Chinese"
# self._glos.targetLangName = "English"
cfile = self.file = open(filename, encoding=self._encoding)
if cfile.seekable():
cfile.seek(0, 2)
self._fileSize = cfile.tell()
cfile.seek(0)
# self._glos.setInfo("input_file_size", f"{self._fileSize}")
else:
log.warning("EDICT2 Reader: file is not seekable")
def close(self) -> None:
self.file.close()
self.file = nullTextIO
def __len__(self) -> int:
return 0
def __iter__(self) -> Iterator[EntryType]:
_file = self.file
fileSize = self._fileSize
glos = self._glos
render_syllables = (
conv.render_syllables_color
if self._colorize_tones
else conv.render_syllables_no_color
)
parse_line = (
conv.parse_line_trad if self._traditional_title else conv.parse_line_simp
)
while True:
line = _file.readline()
if not line:
break
line = line.rstrip("\n")
if not line:
continue
if line.startswith("#"):
continue
parts = parse_line(line)
if parts is None:
log.warning(f"bad line: {line!r}")
continue
names, article_text = conv.render_article(
render_syllables,
conv.Article(*parts),
)
entry = glos.newEntry(
names,
article_text,
defiFormat="h",
byteProgress=(_file.tell(), fileSize) if fileSize else None,
)
yield entry
| 3,309
|
Python
|
.py
| 122
| 24.311475
| 76
| 0.715783
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,889
|
pinyin.py
|
ilius_pyglossary/pyglossary/plugins/edict2/pinyin.py
|
# coding=utf-8
# based on https://github.com/zkoch/CEDICT_Parser
from __future__ import annotations
__all__ = ["convert"]
TONES = {
"a1": "ā",
"a2": "á",
"a3": "ǎ",
"a4": "à",
"e1": "ē",
"e2": "é",
"e3": "ě",
"e4": "è",
"i1": "ī",
"i2": "í",
"i3": "ǐ",
"i4": "ì",
"o1": "ō",
"o2": "ó",
"o3": "ǒ",
"o4": "ò",
"u1": "ū",
"u2": "ú",
"u3": "ǔ",
"u4": "ù",
"v1": "ǖ",
"v2": "ǘ",
"v3": "ǚ",
"v4": "ǜ",
}
# using v for the umlauted u
VOWELS = ("a", "e", "o", "iu", "ui", "i", "u", "v")
def convert(word: str) -> tuple[str, str]:
tone = word[-1]
pinyin = word[:-1].lower()
if tone == "5":
return pinyin, tone
if tone not in {"1", "2", "3", "4"}:
return word, ""
for vowel in VOWELS:
if vowel in pinyin:
vowel1 = vowel[-1]
return pinyin.replace(vowel1, TONES[vowel1 + tone]), tone
return pinyin, tone
| 867
|
Python
|
.py
| 44
| 17
| 60
| 0.510127
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,890
|
plugin-index.py
|
ilius_pyglossary/scripts/plugin-index.py
|
#!/usr/bin/python3
import json
import sys
from collections import OrderedDict as odict
from os.path import abspath, dirname, join
from pathlib import Path
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.core import userPluginsDir
from pyglossary.flags import DEFAULT_NO
from pyglossary.glossary import Glossary
Glossary.init(
usePluginsJson=False,
skipDisabledPlugins=False,
)
userPluginsDirPath = Path(userPluginsDir)
plugins = [
p for p in Glossary.plugins.values() if userPluginsDirPath not in p.path.parents
]
data = []
for p in plugins:
canRead = p.canRead
canWrite = p.canWrite
item = odict(
[
("module", p.module.__name__),
("lname", p.lname),
("name", p.name),
("description", p.description),
("extensions", p.extensions),
("singleFile", p.singleFile),
(
"optionsProp",
{name: opt.toDict() for name, opt in p.optionsProp.items()},
),
("canRead", canRead),
("canWrite", canWrite),
],
)
if p.sortOnWrite != DEFAULT_NO:
item["sortOnWrite"] = p.sortOnWrite
if p.sortKeyName:
item["sortKeyName"] = p.sortKeyName
if canRead:
item["readOptions"] = p.getReadOptions()
if canWrite:
item["writeOptions"] = p.getWriteOptions()
if not p.enable:
item["enable"] = False
if p.readDepends:
item["readDepends"] = p.readDepends
if p.writeDepends:
item["writeDepends"] = p.writeDepends
if p.readCompressions:
item["readCompressions"] = p.readCompressions
data.append(item)
jsonText = json.dumps(
data,
sort_keys=False,
indent="\t",
ensure_ascii=True,
)
with open(
join(rootDir, "plugins-meta", "index.json"),
mode="w",
encoding="utf-8",
newline="\n",
) as _file:
_file.write(jsonText)
| 1,708
|
Python
|
.py
| 69
| 22.42029
| 81
| 0.730061
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,891
|
entry-filters-doc.py
|
ilius_pyglossary/scripts/entry-filters-doc.py
|
#!/usr/bin/python3
import sys
from os.path import abspath, dirname, join
from mako.template import Template
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.entry_filters import entryFiltersRules
from pyglossary.ui.base import UIBase
ui = UIBase()
ui.loadConfig(user=False)
template = Template(
"""${entryFiltersTable}
""",
)
def codeValue(x):
s = str(x)
if s:
return "`" + s + "`"
return ""
def yesNo(x):
if x is True:
return "Yes"
if x is False:
return "No"
return ""
def renderCell(value):
return str(value).replace("\n", "\\n").replace("\t", "\\t")
def renderTable(rows):
"""rows[0] must be headers."""
rows = [[renderCell(cell) for cell in row] for row in rows]
width = [max(len(row[i]) for row in rows) for i in range(len(rows[0]))]
rows = [
[cell.ljust(width[i], " ") for i, cell in enumerate(row)]
for rowI, row in enumerate(rows)
]
rows.insert(1, ["-" * colWidth for colWidth in width])
return "\n".join(["| " + " | ".join(row) + " |" for row in rows])
def getCommandFlagsMD(name):
if name is None:
return ""
opt = ui.configDefDict[name]
flag = name.replace("_", "-")
if opt.falseComment:
return f"`--{flag}`<br/>`--no-{flag}`"
return f"`--{flag}`"
for configParam, default, filterClass in entryFiltersRules:
if configParam is None:
continue
assert ui.config[configParam] == default
assert filterClass.name == configParam
entryFiltersTable = "## Entry Filters\n\n" + renderTable(
[
(
"Name",
"Default Enabled",
"Command Flags",
"Description",
),
]
+ [
(
codeValue(filterClass.name),
yesNo(bool(default)),
getCommandFlagsMD(configParam),
filterClass.desc,
)
for configParam, default, filterClass in entryFiltersRules
],
)
text = template.render(
entryFiltersTable=entryFiltersTable,
)
with open(
join(rootDir, "doc", "entry-filters.md"),
mode="w",
encoding="utf-8",
) as _file:
_file.write(text)
| 1,950
|
Python
|
.py
| 78
| 22.653846
| 72
| 0.693889
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,892
|
config-doc.py
|
ilius_pyglossary/scripts/config-doc.py
|
#!/usr/bin/env python
import json
import re
import sys
from os.path import abspath, dirname, join
from mako.template import Template
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.ui.base import UIBase
ui = UIBase()
ui.loadConfig(user=False)
# ui.configDefDict
re_flag = re.compile("(\\s)(--[a-z\\-]+)")
template = Template(
"""${paramsTable}
${"Configuration Files"}
${"-------------------"}
The default configuration values are stored in `config.json <./../config.json/>`_
file in source/installation directory.
The user configuration file - if exists - will override default configuration
values. The location of this file depends on the operating system:
- Linux or BSD: ``~/.pyglossary/config.json``
- Mac: ``~/Library/Preferences/PyGlossary/config.json``
- Windows: ``C:\\Users\\USERNAME\\AppData\\Roaming\\PyGlossary\\config.json``
${"Using as library"}
${"----------------"}
When you use PyGlossary as a library, neither of ``config.json`` files are
loaded. So if you want to change the config, you should set ``glos.config``
property (which you can do only once for each instance of ``Glossary``).
For example:
.. code:: python
glos = Glossary()
glos.config = {
"lower": True,
}
""",
)
with open(join(rootDir, "scripts/term-colors.json"), encoding="utf-8") as _file:
termColors = json.load(_file)
def codeValue(x):
s = str(x)
if s:
return "``" + s + "``"
return ""
def tableRowSep(width, c="-"):
return "+" + c + f"{c}+{c}".join([c * w for w in width]) + c + "+"
def renderTable(rows):
"""rows[0] must be headers."""
colN = len(rows[0])
width = [
max(max(len(line) for line in row[i].split("\n")) for row in rows)
for i in range(colN)
]
rowSep = tableRowSep(width, "-")
headerSep = tableRowSep(width, "=")
lines = [rowSep]
for rowI, row in enumerate(rows):
newRows = []
for colI, cell in enumerate(row):
for lineI, line in enumerate(cell.split("\n")):
if lineI >= len(newRows):
newRows.append([" " * width[colI] for colI in range(colN)])
newRows[lineI][colI] = line.ljust(width[colI], " ")
lines += ["| " + " | ".join(row) + " |" for row in newRows]
if rowI == 0:
lines.append(headerSep)
else:
lines.append(rowSep)
# widthsStr = ", ".join([str(w) for w in width])
# header = f".. table:: my table\n\t:widths: {widthsStr}\n\n"
# return header + "\n".join(["\t" + line for line in lines])
return "\n".join(lines)
def getCommandFlagsMD(name, opt):
if name.startswith("color.enable.cmd."):
return "``--no-color``"
if not opt.hasFlag:
return ""
flag = opt.customFlag
if not flag:
flag = name.replace("_", "-")
if opt.falseComment:
return f"| ``--{flag}``\n| ``--no-{flag}``"
# return f"- ``--{flag}``\n- ``--no-{flag}``"
return f"``--{flag}``"
def optionComment(name, opt):
comment = opt.comment
comment = re_flag.sub("\\1``\\2``", comment)
if name.startswith("color.cmd."):
comment = f"| {comment}\n| See `term-colors.md <./term-colors.md/>`_"
return comment # noqa: RET504
def jsonCodeValue(value):
# if isinstance(value, str):
# return codeValue(value)
return codeValue(json.dumps(value))
def defaultOptionValue(name, _opt, images):
value = ui.config[name]
valueMD = jsonCodeValue(value)
if name.startswith("color.cmd."):
_hex = termColors[str(value)].lstrip("#")
imageI = f"image{len(images)}"
images.append(
f".. |{imageI}| image:: https://via.placeholder.com/20/{_hex}/000000?text=+",
)
valueMD += f"\n|{imageI}|"
return valueMD
title = "Configuration Parameters"
title += "\n" + len(title) * "-" + "\n"
images = []
paramsTable = title + renderTable(
[
(
"Name",
"Command Flags",
"Type",
"Default",
"Comment",
),
]
+ [
(
codeValue(name),
getCommandFlagsMD(name, opt),
opt.typ,
defaultOptionValue(name, opt, images),
optionComment(name, opt),
)
for name, opt in ui.configDefDict.items()
if not opt.disabled
],
)
text = template.render(
codeValue=codeValue,
ui=ui,
paramsTable=paramsTable,
)
text += "\n"
for image in images:
text += "\n" + image
with open(join(rootDir, "doc", "config.rst"), mode="w", encoding="utf-8") as _file:
_file.write(text)
| 4,205
|
Python
|
.py
| 140
| 27.585714
| 83
| 0.663098
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,893
|
tools-py2toml.py
|
ilius_pyglossary/scripts/tools-py2toml.py
|
#!/usr/bin/python3
import sys
from collections import OrderedDict
from os.path import abspath, dirname, join
from pathlib import Path
import toml
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.core import userPluginsDir
from pyglossary.glossary import Glossary
Glossary.init(
# usePluginsJson=False,
)
userPluginsDirPath = Path(userPluginsDir)
plugins = [
p for p in Glossary.plugins.values() if userPluginsDirPath not in p.path.parents
]
toolsDir = join(rootDir, "plugins-meta", "tools")
for p in plugins:
module = p.module
optionsProp = p.optionsProp
tools = OrderedDict()
for tool in getattr(p.module, "tools", []):
tools[tool.pop("name")] = tool
# if not tools:
# continue
# pprint(tools)
with open(join(toolsDir, f"{p.lname}.toml"), mode="w", encoding="utf-8") as _file:
toml.dump(tools, _file)
| 870
|
Python
|
.py
| 29
| 28.103448
| 83
| 0.763571
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,894
|
create-conf-dir.py
|
ilius_pyglossary/scripts/create-conf-dir.py
|
#!/usr/bin/env python3
import os
import sys
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.core import confDir
os.makedirs(confDir, mode=0o755, exist_ok=True)
| 243
|
Python
|
.py
| 8
| 28.875
| 47
| 0.796537
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,895
|
glos-find-bar-words.py
|
ilius_pyglossary/scripts/glos-find-bar-words.py
|
#!/usr/bin/python3
import sys
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary import Glossary
def hasBar(entry):
return any("|" in word for word in entry.l_word)
Glossary.init(
# usePluginsJson=False,
)
for direct in (True, False):
print(f"\n-------- {direct=}")
glos = Glossary()
glos.config = {
"enable_alts": True,
}
glos.read(
filename=sys.argv[1],
direct=direct,
)
for entry in glos:
if hasBar(entry):
print(f"+++ {entry.l_word!r} -> {entry.defi[:60]}")
continue
# print(f"--- {entry.l_word!r} -> {entry.defi[:60]}")
| 636
|
Python
|
.py
| 26
| 22.115385
| 55
| 0.68
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,896
|
wiki-formats.py
|
ilius_pyglossary/scripts/wiki-formats.py
|
#!/usr/bin/env python3
import os
import sys
from os.path import join
from pathlib import Path
from mako.template import Template
rootDir = join(
os.getenv("HOME"),
"pyglossary",
)
sys.path.insert(0, rootDir)
from pyglossary.core import userPluginsDir
from pyglossary.glossary import Glossary
Glossary.init(
# usePluginsJson=False,
)
"""
Mako template engine:
https://docs.makotemplates.org/en/latest/
https://github.com/sqlalchemy/mako
https://pypi.org/project/Mako/
Package python3-mako in Debian repos
"""
hasIconSet = {
"aard2_slob",
"appledict_bin",
"appledict",
"babylon_bgl",
"cc_cedict",
"csv",
"dicformids",
"dict_cc",
"dict_cc_split",
"digitalnk",
"dsl",
"epub2",
"jmdict",
"kobo",
"lingoes_ldf",
"mobi",
"octopus_mdict",
"sql",
"stardict",
"tabfile",
"wiktionary_dump",
"zim",
}
def pluginIsActive(p):
if not p.enable:
return False
if not (p.canRead or p.canWrite):
return False
return userPluginsDirPath not in p.path.parents
def codeValue(x):
s = str(x)
if s:
return "`" + s + "`"
return ""
def yesNo(x):
if x is True:
return "Yes"
if x is False:
return "No"
return ""
def iconImg(p):
if p.lname not in hasIconSet:
return ""
return (
'<img src="https://raw.githubusercontent.com/wiki/'
f'ilius/pyglossary/icons/{p.lname}.png" height="32"/>'
)
def kindEmoji(p):
kind = p.module.kind
if not kind:
return ""
return {
"text": "�",
"binary": "🔢",
"directory": "�",
"package": "📦",
}[kind]
willNotSupportRead = {
"epub2",
"kobo",
"mobi",
# "html_dir",
"info",
"sql",
}
willNotSupportWrite = {
"appledict_bin",
"babylon_bgl",
"cc_cedict",
"cc_kedict",
"freedict",
"jmdict",
"octopus_mdict",
"wiktionary_dump",
"xdxf",
"wiktextract",
"jmnedict",
}
def readCheck(p):
if p.lname in willNotSupportRead:
return "�"
return "✔" if p.canRead else ""
def writeCheck(p):
if p.lname in willNotSupportWrite:
return "�"
return "✔" if p.canWrite else ""
template = Template(
"""
| | Description | | Read | Write| Doc Link |
|:-:| ----------- |:-:|:----:|:----:| -------- |
% for p in plugins:
| ${iconImg(p)} | ${p.description} | ${kindEmoji(p)} | ${readCheck(p)} | ${writeCheck(p)} | [${p.lname}.md](https://github.com/ilius/pyglossary/blob/master/doc/p/${p.lname}.md) |
% endfor
Legend:
- � Directory
- � Text file
- 📦 Package/archive file
- 🔢 Binary file
- ✔ Supported
- � Will not be supported
""",
)
# wiki = module.wiki
# wiki_md = "―"
# if module.wiki:
# wiki_title = wiki.split("/")[-1].replace("_", " ")
# wiki_md = f"[{wiki_title}]({wiki})"
# website_md = "―"
# if module.website:
# website_md = module.website
userPluginsDirPath = Path(userPluginsDir)
plugins = [p for p in Glossary.plugins.values() if pluginIsActive(p)]
text = template.render(
plugins=plugins,
iconImg=iconImg,
kindEmoji=kindEmoji,
readCheck=readCheck,
writeCheck=writeCheck,
)
with open("Formats.md", mode="w", encoding="utf-8") as _file:
_file.write(text)
| 3,012
|
Python
|
.py
| 145
| 18.848276
| 178
| 0.67842
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,897
|
plugin-doc.py
|
ilius_pyglossary/scripts/plugin-doc.py
|
#!/usr/bin/python3
import sys
from collections import OrderedDict
from os.path import abspath, dirname, join
from pathlib import Path
import toml
from mako.template import Template
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.core import userPluginsDir
from pyglossary.glossary import Glossary
from pyglossary.sort_keys import defaultSortKeyName
Glossary.init(
# usePluginsJson=False,
)
"""
Mako template engine:
https://docs.makotemplates.org/en/latest/
https://github.com/sqlalchemy/mako
https://pypi.org/project/Mako/
Package python3-mako in Debian repos
"""
template = Template(
"""${"##"} ${description}
${topTables}
% if readDependsLinks and readDependsLinks == writeDependsLinks:
${"### Dependencies for reading and writing"}
PyPI Links: ${readDependsLinks}
To install, run:
```sh
${readDependsCmd}
```
% else:
% if readDependsLinks:
${"### Dependencies for reading"}
PyPI Links: ${readDependsLinks}
To install, run:
```sh
${readDependsCmd}
```
% endif
% if writeDependsLinks:
${"### Dependencies for writing"}
PyPI Links: ${writeDependsLinks}
To install, run
```sh
${writeDependsCmd}
```
% endif
% endif
% if extraDocs:
% for title, text in extraDocs:
${f"### {title}"}
${text.replace('(./doc/', '(../')}
% endfor
% endif
${toolsTable}
""",
)
def codeValue(x):
s = str(x)
if s:
return "`" + s + "`"
return ""
def yesNo(x):
if x is True:
return "Yes"
if x is False:
return "No"
return ""
def kindEmoji(kind):
if not kind:
return ""
return {
"text": "üìù",
"binary": "üî¢",
"directory": "üìÅ",
"package": "üì¶",
}[kind]
def renderLink(title, url):
if "(" in title or ")" in title:
url = f"<{url}>"
title = title.replace("|", "\\|")
return f"[{title}]({url})"
def pypiLink(pypiName):
urlPath = pypiName.replace("==", "/")
urlPath = urlPath.replace(">", "%3E")
return renderLink(
pypiName.replace("==", " "),
f"https://pypi.org/project/{urlPath}",
)
def makeDependsDoc(cls):
if not (cls and getattr(cls, "depends", None)):
return "", ""
links = ", ".join([pypiLink(pypiName) for pypiName in cls.depends.values()])
cmd = "pip3 install " + " ".join(
cls.depends.values(),
)
return links, cmd
def sortKeyName(p):
value = p.sortKeyName
if value:
return codeValue(value)
return "(" + codeValue(defaultSortKeyName) + ")"
def renderCell(value):
return str(value).replace("\n", "\\n").replace("\t", "\\t")
def renderTable(rows):
"""rows[0] must be headers."""
rows = [[renderCell(cell) for cell in row] for row in rows]
width = [max(len(row[i]) for row in rows) for i in range(len(rows[0]))]
rows = [
[cell.ljust(width[i], " ") for i, cell in enumerate(row)]
for rowI, row in enumerate(rows)
]
rows.insert(1, ["-" * colWidth for colWidth in width])
return "\n".join(["| " + " | ".join(row) + " |" for row in rows])
def renderRWOptions(options):
return renderTable(
[("Name", "Default", "Type", "Comment")]
+ [
(
optName,
codeValue(default),
optionsType[optName],
optionsComment[optName],
)
for optName, default in options.items()
],
)
def pluginIsActive(p):
if not p.enable:
return False
if not (p.canRead or p.canWrite):
return False
return userPluginsDirPath not in p.path.parents
def getToolSourceLink(tool):
url = tool.get("source")
if not url:
return "―"
_, title = url.split("://")
if title.startswith("github.com/"):
title = "@" + title[len("github.com/") :]
return renderLink(title, url)
userPluginsDirPath = Path(userPluginsDir)
plugins = [p for p in Glossary.plugins.values() if pluginIsActive(p)]
toolsDir = join(rootDir, "plugins-meta", "tools")
for p in plugins:
module = p.module
optionsProp = p.optionsProp
wiki = module.wiki
wiki_md = "―"
if wiki:
if wiki.startswith("https://github.com/"):
wiki_title = "@" + wiki[len("https://github.com/") :]
else:
wiki_title = wiki.split("/")[-1].replace("_", " ")
wiki_md = renderLink(wiki_title, wiki)
website_md = "―"
website = module.website
if website:
if isinstance(website, str):
website_md = website
else:
try:
url, title = website
except ValueError:
raise ValueError(f"{website = }") from None
website_md = renderLink(title, url)
(
readDependsLinks,
readDependsCmd,
) = makeDependsDoc(getattr(module, "Reader", None))
(
writeDependsLinks,
writeDependsCmd,
) = makeDependsDoc(getattr(module, "Writer", None))
extraDocs = getattr(module, "extraDocs", [])
toolsFile = join(toolsDir, f"{p.lname}.toml")
try:
with open(toolsFile, encoding="utf-8") as _file:
tools_toml = toml.load(_file, _dict=OrderedDict)
except FileNotFoundError:
tools = []
except Exception as e:
print(f"\nFile: {toolsFile}")
raise e
else:
for toolName, tool in tools_toml.items():
tool.update({"name": toolName})
tools = tools_toml.values()
generalInfoTable = "### General Information\n\n" + renderTable(
[
("Attribute", "Value"),
("Name", p.name),
("snake_case_name", p.lname),
("Description", p.description),
("Extensions", ", ".join([codeValue(ext) for ext in p.extensions])),
("Read support", yesNo(p.canRead)),
("Write support", yesNo(p.canWrite)),
("Single-file", yesNo(p.singleFile)),
("Kind", f"{kindEmoji(module.kind)} {module.kind}"),
("Sort-on-write", p.sortOnWrite),
("Sort key", sortKeyName(p)),
("Wiki", wiki_md),
("Website", website_md),
],
)
topTables = generalInfoTable
try:
optionsType = {optName: opt.typ for optName, opt in optionsProp.items()}
except Exception:
print(f"{optionsProp = }")
raise
optionsComment = {
optName: opt.comment.replace("\n", "<br />")
for optName, opt in optionsProp.items()
}
readOptions = p.getReadOptions()
if readOptions:
topTables += "\n\n### Read options\n\n" + renderRWOptions(readOptions)
writeOptions = p.getWriteOptions()
if writeOptions:
topTables += "\n\n### Write options\n\n" + renderRWOptions(writeOptions)
toolsTable = ""
if tools:
toolsTable = "### Dictionary Applications/Tools\n\n" + renderTable(
[
(
"Name & Website",
"Source code",
"License",
"Platforms",
"Language",
),
]
+ [
(
f"[{tool['name']}]({tool['web']})",
getToolSourceLink(tool),
tool["license"],
", ".join(tool["platforms"]),
tool.get("plang", ""),
)
for tool in tools
],
)
text = template.render(
description=p.description,
codeValue=codeValue,
yesNo=yesNo,
topTables=topTables,
optionsProp=optionsProp,
readOptions=readOptions,
writeOptions=writeOptions,
optionsComment=optionsComment,
optionsType=optionsType,
readDependsLinks=readDependsLinks,
readDependsCmd=readDependsCmd,
writeDependsLinks=writeDependsLinks,
writeDependsCmd=writeDependsCmd,
extraDocs=extraDocs,
toolsTable=toolsTable,
)
for _i in range(3):
text = text.replace("\n\n\n", "\n\n")
if text.endswith("\n\n"):
text = text[:-1]
with open(
join(rootDir, "doc", "p", f"{p.lname}.md"),
mode="w",
encoding="utf-8",
newline="\n",
) as _file:
_file.write(text)
indexText = renderTable(
[("Description", "Name", "Doc Link")]
+ [
(
p.description,
p.name,
renderLink(f"{p.lname}.md", f"./{p.lname}.md"),
)
for p in plugins
],
)
with open(
join(rootDir, "doc", "p", "__index__.md"),
mode="w",
encoding="utf-8",
newline="\n",
) as _file:
_file.write(indexText + "\n")
| 7,430
|
Python
|
.py
| 292
| 22.739726
| 77
| 0.677036
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,898
|
plist-to-json.py
|
ilius_pyglossary/scripts/plist-to-json.py
|
#!/usr/bin/env python
import json
import sys
import biplist
plistPath = sys.argv[1]
try:
data = biplist.readPlist(plistPath)
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException):
try:
import plistlib
with open(plistPath, mode="rb") as plist_file:
data = plistlib.loads(plist_file.read())
except Exception as e:
raise OSError(
"'Info.plist' file is malformed, "
f"Please provide 'Contents/' with a correct 'Info.plist'. {e}",
) from e
print(json.dumps(data, indent="\t", sort_keys=True))
| 532
|
Python
|
.py
| 18
| 27.111111
| 72
| 0.75
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,899
|
plugin-validate.py
|
ilius_pyglossary/scripts/plugin-validate.py
|
#!/usr/bin/python3
import sys
from os.path import abspath, dirname
from pathlib import Path
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.core import userPluginsDir
from pyglossary.glossary import Glossary
Glossary.init(
usePluginsJson=False,
skipDisabledPlugins=False,
)
userPluginsDirPath = Path(userPluginsDir)
plugins = [
p for p in Glossary.plugins.values() if userPluginsDirPath not in p.path.parents
]
data = []
for p in plugins:
module = p.module
# print(module.__file__)
p.checkModule(module)
p.checkModuleMore(module)
| 585
|
Python
|
.py
| 22
| 25
| 81
| 0.804309
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|